1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* Thread creation, initialization, and basic low-level routines */
21 #include <netdb.h> /* for h_errno */
29 #include <sys/resource.h>
31 #include "internals.h"
34 #include "debug.h" /* added to linuxthreads -StS */
37 /* Mods for uClibc: Some includes */
39 #include <sys/types.h>
40 #include <sys/syscall.h>
42 /* mods for uClibc: getpwd and getpagesize are the syscalls */
43 #define __getpid getpid
44 #define __getpagesize getpagesize
45 /* mods for uClibc: __libc_sigaction is not in any standard headers */
46 extern int __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact);
49 /* These variables are used by the setup code. */
54 /* Descriptor of the initial thread */
56 struct _pthread_descr_struct __pthread_initial_thread = {
57 &__pthread_initial_thread, /* pthread_descr p_nextlive */
58 &__pthread_initial_thread, /* pthread_descr p_prevlive */
59 NULL, /* pthread_descr p_nextwaiting */
60 NULL, /* pthread_descr p_nextlock */
61 PTHREAD_THREADS_MAX, /* pthread_t p_tid */
63 0, /* int p_priority */
64 &__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
66 NULL, /* sigjmp_buf * p_signal_buf */
67 NULL, /* sigjmp_buf * p_cancel_buf */
68 0, /* char p_terminated */
69 0, /* char p_detached */
70 0, /* char p_exited */
71 NULL, /* void * p_retval */
73 NULL, /* pthread_descr p_joining */
74 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
75 0, /* char p_cancelstate */
76 0, /* char p_canceltype */
77 0, /* char p_canceled */
78 &_errno, /* int *p_errnop */
80 &_h_errno, /* int *p_h_errnop */
81 0, /* int p_h_errno */
82 NULL, /* char * p_in_sighandler */
83 0, /* char p_sigwaiting */
84 PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */
85 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
86 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
87 0, /* int p_userstack */
88 NULL, /* void * p_guardaddr */
89 0, /* size_t p_guardsize */
90 &__pthread_initial_thread, /* pthread_descr p_self */
91 0, /* Always index 0 */
92 0, /* int p_report_events */
93 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
94 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
95 0, /* char p_woken_by_cancel */
96 0, /* char p_condvar_avail */
97 0, /* char p_sem_avail */
98 NULL, /* struct pthread_extricate_if *p_extricate */
99 NULL, /* pthread_readlock_info *p_readlock_list; */
100 NULL, /* pthread_readlock_info *p_readlock_free; */
101 0 /* int p_untracked_readlock_count; */
104 /* Descriptor of the manager thread; none of this is used but the error
105 variables, the p_pid and p_priority fields,
106 and the address for identification. */
107 #define manager_thread (&__pthread_manager_thread)
108 struct _pthread_descr_struct __pthread_manager_thread = {
109 NULL, /* pthread_descr p_nextlive */
110 NULL, /* pthread_descr p_prevlive */
111 NULL, /* pthread_descr p_nextwaiting */
112 NULL, /* pthread_descr p_nextlock */
115 0, /* int p_priority */
116 &__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */
117 0, /* int p_signal */
118 NULL, /* sigjmp_buf * p_signal_buf */
119 NULL, /* sigjmp_buf * p_cancel_buf */
120 0, /* char p_terminated */
121 0, /* char p_detached */
122 0, /* char p_exited */
123 NULL, /* void * p_retval */
124 0, /* int p_retval */
125 NULL, /* pthread_descr p_joining */
126 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
127 0, /* char p_cancelstate */
128 0, /* char p_canceltype */
129 0, /* char p_canceled */
130 &__pthread_manager_thread.p_errno, /* int *p_errnop */
132 NULL, /* int *p_h_errnop */
133 0, /* int p_h_errno */
134 NULL, /* char * p_in_sighandler */
135 0, /* char p_sigwaiting */
136 PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */
137 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
138 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
139 0, /* int p_userstack */
140 NULL, /* void * p_guardaddr */
141 0, /* size_t p_guardsize */
142 &__pthread_manager_thread, /* pthread_descr p_self */
143 1, /* Always index 1 */
144 0, /* int p_report_events */
145 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
146 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
147 0, /* char p_woken_by_cancel */
148 0, /* char p_condvar_avail */
149 0, /* char p_sem_avail */
150 NULL, /* struct pthread_extricate_if *p_extricate */
151 NULL, /* pthread_readlock_info *p_readlock_list; */
152 NULL, /* pthread_readlock_info *p_readlock_free; */
153 0 /* int p_untracked_readlock_count; */
156 /* Pointer to the main thread (the father of the thread manager thread) */
157 /* Originally, this is the initial thread, but this changes after fork() */
159 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
161 /* Limit between the stack of the initial thread (above) and the
162 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
164 char *__pthread_initial_thread_bos = NULL;
166 /* For non-MMU systems also remember to stack top of the initial thread.
167 * This is adapted when other stacks are malloc'ed since we don't know
168 * the bounds a-priori. -StS */
170 #ifndef __UCLIBC_HAS_MMU__
171 char *__pthread_initial_thread_tos = NULL;
172 #endif /* __UCLIBC_HAS_MMU__ */
174 /* File descriptor for sending requests to the thread manager. */
175 /* Initially -1, meaning that the thread manager is not running. */
177 int __pthread_manager_request = -1;
179 /* Other end of the pipe for sending requests to the thread manager. */
181 int __pthread_manager_reader;
183 /* Limits of the thread manager stack */
185 char *__pthread_manager_thread_bos = NULL;
186 char *__pthread_manager_thread_tos = NULL;
188 /* For process-wide exit() */
190 int __pthread_exit_requested = 0;
191 int __pthread_exit_code = 0;
193 /* Communicate relevant LinuxThreads constants to gdb */
195 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
196 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
197 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct, h_descr);
198 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
200 const int __linuxthreads_pthread_sizeof_descr
201 = sizeof(struct _pthread_descr_struct);
203 const int __linuxthreads_initial_report_events;
205 const char __linuxthreads_version[] = VERSION;
207 /* Forward declarations */
208 static void pthread_onexit_process(int retcode, void *arg);
209 static void pthread_handle_sigcancel(int sig);
210 static void pthread_handle_sigrestart(int sig);
211 static void pthread_handle_sigdebug(int sig);
212 int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime);
214 /* Signal numbers used for the communication.
215 In these variables we keep track of the used variables. If the
216 platform does not support any real-time signals we will define the
217 values to some unreasonable value which will signal failing of all
218 the functions below. */
219 #ifndef __NR_rt_sigaction
220 static int current_rtmin = -1;
221 static int current_rtmax = -1;
222 int __pthread_sig_restart = SIGUSR1;
223 int __pthread_sig_cancel = SIGUSR2;
224 int __pthread_sig_debug;
227 #if __SIGRTMAX - __SIGRTMIN >= 3
228 static int current_rtmin = __SIGRTMIN + 3;
229 static int current_rtmax = __SIGRTMAX;
230 int __pthread_sig_restart = __SIGRTMIN;
231 int __pthread_sig_cancel = __SIGRTMIN + 1;
232 int __pthread_sig_debug = __SIGRTMIN + 2;
233 void (*__pthread_restart)(pthread_descr) = __pthread_restart_new;
234 void (*__pthread_suspend)(pthread_descr) = __pthread_wait_for_restart_signal;
235 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_new;
237 static int current_rtmin = __SIGRTMIN;
238 static int current_rtmax = __SIGRTMAX;
239 int __pthread_sig_restart = SIGUSR1;
240 int __pthread_sig_cancel = SIGUSR2;
241 int __pthread_sig_debug;
242 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
243 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
244 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
248 /* Return number of available real-time signal with highest priority. */
249 int __libc_current_sigrtmin (void)
251 return current_rtmin;
254 /* Return number of available real-time signal with lowest priority. */
255 int __libc_current_sigrtmax (void)
257 return current_rtmax;
260 /* Allocate real-time signal with highest/lowest available
261 priority. Please note that we don't use a lock since we assume
262 this function to be called at program start. */
263 int __libc_allocate_rtsig (int high)
265 if (current_rtmin == -1 || current_rtmin > current_rtmax)
266 /* We don't have anymore signal available. */
268 return high ? current_rtmin++ : current_rtmax--;
272 /* Initialize the pthread library.
273 Initialization is split in two functions:
274 - a constructor function that blocks the __pthread_sig_restart signal
275 (must do this very early, since the program could capture the signal
276 mask with e.g. sigsetjmp before creating the first thread);
277 - a regular function called from pthread_create when needed. */
279 static void pthread_initialize(void) __attribute__((constructor));
281 /* Do some minimal initialization which has to be done during the
282 startup of the C library. */
283 void __pthread_initialize_minimal(void)
285 /* If we have special thread_self processing, initialize
286 * that for the main thread now. */
287 #ifdef INIT_THREAD_SELF
288 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
293 static void pthread_initialize(void)
300 /* If already done (e.g. by a constructor called earlier!), bail out */
301 if (__pthread_initial_thread_bos != NULL) return;
302 #ifdef TEST_FOR_COMPARE_AND_SWAP
303 /* Test if compare-and-swap is available */
304 __pthread_has_cas = compare_and_swap_is_available();
306 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
307 below the current stack address, and align that on a
308 STACK_SIZE boundary. */
309 __pthread_initial_thread_bos =
310 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
311 /* Update the descriptor for the initial thread. */
312 __pthread_initial_thread.p_pid = __getpid();
313 /* If we have special thread_self processing, initialize that for the
315 #ifdef INIT_THREAD_SELF
316 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
318 /* The errno/h_errno variable of the main thread are the global ones. */
319 __pthread_initial_thread.p_errnop = &_errno;
320 __pthread_initial_thread.p_h_errnop = &_h_errno;
321 /* Play with the stack size limit to make sure that no stack ever grows
322 beyond STACK_SIZE minus two pages (one page for the thread descriptor
323 immediately beyond, and one page to act as a guard page). */
325 #ifdef __UCLIBC_HAS_MMU__
326 /* We cannot allocate a huge chunk of memory to mmap all thread stacks later
327 * on a non-MMU system. Thus, we don't need the rlimit either. -StS */
328 getrlimit(RLIMIT_STACK, &limit);
329 max_stack = STACK_SIZE - 2 * __getpagesize();
330 if (limit.rlim_cur > max_stack) {
331 limit.rlim_cur = max_stack;
332 setrlimit(RLIMIT_STACK, &limit);
335 /* For non-MMU assume __pthread_initial_thread_tos at upper page boundary, and
336 * __pthread_initial_thread_bos at address 0. These bounds are refined as we
337 * malloc other stack frames such that they don't overlap. -StS
339 __pthread_initial_thread_tos =
340 (char *)(((long)CURRENT_STACK_FRAME + __getpagesize()) & ~(__getpagesize() - 1));
341 __pthread_initial_thread_bos = (char *) 1; /* set it non-zero so we know we have been here */
342 PDEBUG("initial thread stack bounds: bos=%p, tos=%p\n",
343 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
344 #endif /* __UCLIBC_HAS_MMU__ */
346 /* Setup signal handlers for the initial thread.
347 Since signal handlers are shared between threads, these settings
348 will be inherited by all other threads. */
349 sa.sa_handler = pthread_handle_sigrestart;
350 sigemptyset(&sa.sa_mask);
352 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
353 sa.sa_handler = pthread_handle_sigcancel;
355 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
356 if (__pthread_sig_debug > 0) {
357 sa.sa_handler = pthread_handle_sigdebug;
358 sigemptyset(&sa.sa_mask);
360 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
362 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
364 sigaddset(&mask, __pthread_sig_restart);
365 sigprocmask(SIG_BLOCK, &mask, NULL);
366 /* Register an exit function to kill all other threads. */
367 /* Do it early so that user-registered atexit functions are called
368 before pthread_onexit_process. */
369 on_exit(pthread_onexit_process, NULL);
372 void __pthread_initialize(void)
374 pthread_initialize();
377 int __pthread_initialize_manager(void)
382 struct pthread_request request;
384 /* If basic initialization not done yet (e.g. we're called from a
385 constructor run before our constructor), do it now */
386 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
387 /* Setup stack for thread manager */
388 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
389 if (__pthread_manager_thread_bos == NULL) return -1;
390 __pthread_manager_thread_tos =
391 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
393 /* On non-MMU systems we make sure that the initial thread bounds don't overlap
394 * with the manager stack frame */
395 NOMMU_INITIAL_THREAD_BOUNDS(__pthread_manager_thread_tos,__pthread_manager_thread_bos);
396 PDEBUG("manager stack: size=%d, bos=%p, tos=%p\n", THREAD_MANAGER_STACK_SIZE,
397 __pthread_manager_thread_bos, __pthread_manager_thread_tos);
399 PDEBUG("initial stack: estimate bos=%p, tos=%p\n",
400 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
403 /* Setup pipe to communicate with thread manager */
404 if (pipe(manager_pipe) == -1) {
405 free(__pthread_manager_thread_bos);
408 /* Start the thread manager */
411 if (__linuxthreads_initial_report_events != 0)
412 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
413 __linuxthreads_initial_report_events);
414 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
416 if (__linuxthreads_initial_report_events != 0)
417 __pthread_initial_thread.p_report_events
418 = __linuxthreads_initial_report_events;
419 report_events = __pthread_initial_thread.p_report_events;
421 if (__builtin_expect (report_events, 0))
423 /* It's a bit more complicated. We have to report the creation of
424 the manager thread. */
425 int idx = __td_eventword (TD_CREATE);
426 uint32_t mask = __td_eventmask (TD_CREATE);
428 if ((mask & (__pthread_threads_events.event_bits[idx]
429 | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx]))
433 __pthread_lock(__pthread_manager_thread.p_lock, NULL);
435 pid = clone(__pthread_manager_event,
436 (void **) __pthread_manager_thread_tos,
437 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
438 (void *)(long)manager_pipe[0]);
442 /* Now fill in the information about the new thread in
443 the newly created thread's data structure. We cannot let
444 the new thread do this since we don't know whether it was
445 already scheduled when we send the event. */
446 __pthread_manager_thread.p_eventbuf.eventdata =
447 &__pthread_manager_thread;
448 __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE;
449 __pthread_last_event = &__pthread_manager_thread;
450 __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
451 __pthread_manager_thread.p_pid = pid;
453 /* Now call the function which signals the event. */
454 __linuxthreads_create_event ();
456 /* Now restart the thread. */
457 __pthread_unlock(__pthread_manager_thread.p_lock);
462 pid = clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
463 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
464 (void *)(long)manager_pipe[0]);
467 free(__pthread_manager_thread_bos);
468 __libc_close(manager_pipe[0]);
469 __libc_close(manager_pipe[1]);
472 __pthread_manager_request = manager_pipe[1]; /* writing end */
473 __pthread_manager_reader = manager_pipe[0]; /* reading end */
474 __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
475 __pthread_manager_thread.p_pid = pid;
477 /* Make gdb aware of new thread manager */
478 if (__pthread_threads_debug && __pthread_sig_debug > 0)
480 raise(__pthread_sig_debug);
481 /* We suspend ourself and gdb will wake us up when it is
482 ready to handle us. */
483 __pthread_wait_for_restart_signal(thread_self());
485 /* Synchronize debugging of the thread manager */
486 PDEBUG("send REQ_DEBUG to manager thread\n");
487 request.req_kind = REQ_DEBUG;
488 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
489 (char *) &request, sizeof(request)));
493 /* Thread creation */
495 int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
496 void * (*start_routine)(void *), void *arg)
498 pthread_descr self = thread_self();
499 struct pthread_request request;
500 if (__pthread_manager_request < 0) {
501 if (__pthread_initialize_manager() < 0) return EAGAIN;
503 request.req_thread = self;
504 request.req_kind = REQ_CREATE;
505 request.req_args.create.attr = attr;
506 request.req_args.create.fn = start_routine;
507 request.req_args.create.arg = arg;
508 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
509 &request.req_args.create.mask);
510 PDEBUG("write REQ_CREATE to manager thread\n");
511 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
512 (char *) &request, sizeof(request)));
513 PDEBUG("before suspend(self)\n");
515 PDEBUG("after suspend(self)\n");
516 if (THREAD_GETMEM(self, p_retcode) == 0)
517 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
518 return THREAD_GETMEM(self, p_retcode);
521 /* Simple operations on thread identifiers */
523 pthread_t pthread_self(void)
525 pthread_descr self = thread_self();
526 return THREAD_GETMEM(self, p_tid);
529 int pthread_equal(pthread_t thread1, pthread_t thread2)
531 return thread1 == thread2;
534 /* Helper function for thread_self in the case of user-provided stacks */
538 pthread_descr __pthread_find_self()
540 char * sp = CURRENT_STACK_FRAME;
543 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
544 the manager threads handled specially in thread_self(), so start at 2 */
545 h = __pthread_handles + 2;
546 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
549 if (h->h_descr == NULL) {
550 printf("*** "__FUNCTION__" ERROR descriptor is NULL!!!!! ***\n\n");
559 static pthread_descr thread_self_stack(void)
561 char *sp = CURRENT_STACK_FRAME;
564 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
565 return manager_thread;
566 h = __pthread_handles + 2;
568 while (h->h_descr == NULL
569 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
572 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
580 /* Thread scheduling */
582 int pthread_setschedparam(pthread_t thread, int policy,
583 const struct sched_param *param)
585 pthread_handle handle = thread_handle(thread);
588 __pthread_lock(&handle->h_lock, NULL);
589 if (invalid_handle(handle, thread)) {
590 __pthread_unlock(&handle->h_lock);
593 th = handle->h_descr;
594 if (sched_setscheduler(th->p_pid, policy, param) == -1) {
595 __pthread_unlock(&handle->h_lock);
598 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
599 __pthread_unlock(&handle->h_lock);
600 if (__pthread_manager_request >= 0)
601 __pthread_manager_adjust_prio(th->p_priority);
605 int pthread_getschedparam(pthread_t thread, int *policy,
606 struct sched_param *param)
608 pthread_handle handle = thread_handle(thread);
611 __pthread_lock(&handle->h_lock, NULL);
612 if (invalid_handle(handle, thread)) {
613 __pthread_unlock(&handle->h_lock);
616 pid = handle->h_descr->p_pid;
617 __pthread_unlock(&handle->h_lock);
618 pol = sched_getscheduler(pid);
619 if (pol == -1) return errno;
620 if (sched_getparam(pid, param) == -1) return errno;
625 /* Process-wide exit() request */
627 static void pthread_onexit_process(int retcode, void *arg)
629 struct pthread_request request;
630 pthread_descr self = thread_self();
632 if (__pthread_manager_request >= 0) {
633 request.req_thread = self;
634 request.req_kind = REQ_PROCESS_EXIT;
635 request.req_args.exit.code = retcode;
636 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
637 (char *) &request, sizeof(request)));
639 /* Main thread should accumulate times for thread manager and its
640 children, so that timings for main thread account for all threads. */
641 if (self == __pthread_main_thread) {
642 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
643 /* Since all threads have been asynchronously terminated
644 * (possibly holding locks), free cannot be used any more. */
645 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
650 /* The handler for the RESTART signal just records the signal received
651 in the thread descriptor, and optionally performs a siglongjmp
652 (for pthread_cond_timedwait). */
654 static void pthread_handle_sigrestart(int sig)
656 pthread_descr self = thread_self();
657 THREAD_SETMEM(self, p_signal, sig);
658 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
659 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
662 /* The handler for the CANCEL signal checks for cancellation
663 (in asynchronous mode), for process-wide exit and exec requests.
664 For the thread manager thread, redirect the signal to
665 __pthread_manager_sighandler. */
667 static void pthread_handle_sigcancel(int sig)
669 pthread_descr self = thread_self();
673 if (self == &__pthread_manager_thread)
676 /* A new thread might get a cancel signal before it is fully
677 initialized, so that the thread register might still point to the
678 manager thread. Double check that this is really the manager
680 pthread_descr real_self = thread_self_stack();
681 if (real_self == &__pthread_manager_thread)
683 __pthread_manager_sighandler(sig);
686 /* Oops, thread_self() isn't working yet.. */
688 # ifdef INIT_THREAD_SELF
689 INIT_THREAD_SELF(self, self->p_nr);
692 __pthread_manager_sighandler(sig);
696 if (__builtin_expect (__pthread_exit_requested, 0)) {
697 /* Main thread should accumulate times for thread manager and its
698 children, so that timings for main thread account for all threads. */
699 if (self == __pthread_main_thread) {
701 waitpid(__pthread_manager_thread->p_pid, NULL, __WCLONE);
703 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
706 _exit(__pthread_exit_code);
708 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
709 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
710 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
711 pthread_exit(PTHREAD_CANCELED);
712 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
713 if (jmpbuf != NULL) {
714 THREAD_SETMEM(self, p_cancel_jmp, NULL);
715 siglongjmp(*jmpbuf, 1);
720 /* Handler for the DEBUG signal.
721 The debugging strategy is as follows:
722 On reception of a REQ_DEBUG request (sent by new threads created to
723 the thread manager under debugging mode), the thread manager throws
724 __pthread_sig_debug to itself. The debugger (if active) intercepts
725 this signal, takes into account new threads and continue execution
726 of the thread manager by propagating the signal because it doesn't
727 know what it is specifically done for. In the current implementation,
728 the thread manager simply discards it. */
730 static void pthread_handle_sigdebug(int sig)
735 /* Reset the state of the thread machinery after a fork().
736 Close the pipe used for requests and set the main thread to the forked
738 Notice that we can't free the stack segments, as the forked thread
739 may hold pointers into them. */
741 void __pthread_reset_main_thread()
743 pthread_descr self = thread_self();
745 if (__pthread_manager_request != -1) {
746 /* Free the thread manager stack */
747 free(__pthread_manager_thread_bos);
748 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
749 /* Close the two ends of the pipe */
750 __libc_close(__pthread_manager_request);
751 __libc_close(__pthread_manager_reader);
752 __pthread_manager_request = __pthread_manager_reader = -1;
755 /* Update the pid of the main thread */
756 THREAD_SETMEM(self, p_pid, __getpid());
757 /* Make the forked thread the main thread */
758 __pthread_main_thread = self;
759 THREAD_SETMEM(self, p_nextlive, self);
760 THREAD_SETMEM(self, p_prevlive, self);
761 /* Now this thread modifies the global variables. */
762 THREAD_SETMEM(self, p_errnop, &_errno);
763 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
766 /* Process-wide exec() request */
768 void __pthread_kill_other_threads_np(void)
771 /* Terminate all other threads and thread manager */
772 pthread_onexit_process(0, NULL);
773 /* Make current thread the main thread in case the calling thread
774 changes its mind, does not exec(), and creates new threads instead. */
775 __pthread_reset_main_thread();
776 /* Reset the signal handlers behaviour for the signals the
777 implementation uses since this would be passed to the new
779 sigemptyset(&sa.sa_mask);
781 sa.sa_handler = SIG_DFL;
782 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
783 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
784 if (__pthread_sig_debug > 0)
785 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
787 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
789 /* Concurrency symbol level. */
790 static int current_level;
792 int __pthread_setconcurrency(int level)
794 /* We don't do anything unless we have found a useful interpretation. */
795 current_level = level;
798 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
800 int __pthread_getconcurrency(void)
802 return current_level;
804 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
807 /* Primitives for controlling thread execution */
809 void __pthread_wait_for_restart_signal(pthread_descr self)
813 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
814 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
815 THREAD_SETMEM(self, p_signal, 0);
817 sigsuspend(&mask); /* Wait for signal */
818 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
820 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
823 #ifndef __NR_rt_sigaction
824 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
826 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
827 Since the restart signal does not queue, we use an atomic counter to create
828 queuing semantics. This is needed to resolve a rare race condition in
829 pthread_cond_timedwait_relative. */
831 void __pthread_restart_old(pthread_descr th)
833 if (atomic_increment(&th->p_resume_count) == -1)
834 kill(th->p_pid, __pthread_sig_restart);
837 void __pthread_suspend_old(pthread_descr self)
839 if (atomic_decrement(&self->p_resume_count) <= 0)
840 __pthread_wait_for_restart_signal(self);
844 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
846 sigset_t unblock, initial_mask;
847 int was_signalled = 0;
850 if (atomic_decrement(&self->p_resume_count) == 0) {
851 /* Set up a longjmp handler for the restart signal, unblock
852 the signal and sleep. */
854 if (sigsetjmp(jmpbuf, 1) == 0) {
855 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
856 THREAD_SETMEM(self, p_signal, 0);
857 /* Unblock the restart signal */
858 sigemptyset(&unblock);
859 sigaddset(&unblock, __pthread_sig_restart);
860 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
864 struct timespec reltime;
866 /* Compute a time offset relative to now. */
867 __gettimeofday (&now, NULL);
868 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
869 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
870 if (reltime.tv_nsec < 0) {
871 reltime.tv_nsec += 1000000000;
875 /* Sleep for the required duration. If woken by a signal,
876 resume waiting as required by Single Unix Specification. */
877 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
881 /* Block the restart signal again */
882 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
887 THREAD_SETMEM(self, p_signal_jmp, NULL);
890 /* Now was_signalled is true if we exited the above code
891 due to the delivery of a restart signal. In that case,
892 we know we have been dequeued and resumed and that the
893 resume count is balanced. Otherwise, there are some
894 cases to consider. First, try to bump up the resume count
895 back to zero. If it goes to 1, it means restart() was
896 invoked on this thread. The signal must be consumed
897 and the count bumped down and everything is cool. We
898 can return a 1 to the caller.
899 Otherwise, no restart was delivered yet, so a potential
900 race exists; we return a 0 to the caller which must deal
901 with this race in an appropriate way; for example by
902 atomically removing the thread from consideration for a
903 wakeup---if such a thing fails, it means a restart is
906 if (!was_signalled) {
907 if (atomic_increment(&self->p_resume_count) != -1) {
908 __pthread_wait_for_restart_signal(self);
909 atomic_decrement(&self->p_resume_count); /* should be zero now! */
910 /* woke spontaneously and consumed restart signal */
913 /* woke spontaneously but did not consume restart---caller must resolve */
916 /* woken due to restart signal */
919 #endif /* __NR_rt_sigaction */
922 #ifdef __NR_rt_sigaction
923 void __pthread_restart_new(pthread_descr th)
925 /* The barrier is proabably not needed, in which case it still documents
926 our assumptions. The intent is to commit previous writes to shared
927 memory so the woken thread will have a consistent view. Complementary
928 read barriers are present to the suspend functions. */
929 WRITE_MEMORY_BARRIER();
930 kill(th->p_pid, __pthread_sig_restart);
933 int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
935 sigset_t unblock, initial_mask;
936 int was_signalled = 0;
939 if (sigsetjmp(jmpbuf, 1) == 0) {
940 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
941 THREAD_SETMEM(self, p_signal, 0);
942 /* Unblock the restart signal */
943 sigemptyset(&unblock);
944 sigaddset(&unblock, __pthread_sig_restart);
945 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
949 struct timespec reltime;
951 /* Compute a time offset relative to now. */
952 gettimeofday (&now, NULL);
953 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
954 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
955 if (reltime.tv_nsec < 0) {
956 reltime.tv_nsec += 1000000000;
960 /* Sleep for the required duration. If woken by a signal,
961 resume waiting as required by Single Unix Specification. */
962 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
966 /* Block the restart signal again */
967 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
972 THREAD_SETMEM(self, p_signal_jmp, NULL);
974 /* Now was_signalled is true if we exited the above code
975 due to the delivery of a restart signal. In that case,
976 everything is cool. We have been removed from whatever
977 we were waiting on by the other thread, and consumed its signal.
979 Otherwise we this thread woke up spontaneously, or due to a signal other
980 than restart. This is an ambiguous case that must be resolved by
981 the caller; the thread is still eligible for a restart wakeup
982 so there is a race. */
984 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
985 return was_signalled;
994 void __pthread_message(char * fmt, ...)
998 sprintf(buffer, "%05d : ", __getpid());
1000 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1002 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
1009 /* We need a hook to force the cancelation wrappers to be linked in when
1010 static libpthread is used. */
1011 extern const int __pthread_provide_wrappers;
1012 static const int *const __pthread_require_wrappers =
1013 &__pthread_provide_wrappers;