1 /* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 #include <hp-timing.h>
28 #include <libc-internal.h>
30 #include <bits/kernel-features.h>
33 /* Local function to start thread and handle cleanup. */
34 static int start_thread (void *arg);
37 /* Nozero if debugging mode is enabled. */
40 /* Globally enabled events. */
41 static td_thr_events_t __nptl_threads_events __attribute_used__;
43 /* Pointer to descriptor with the last event. */
44 static struct pthread *__nptl_last_event __attribute_used__;
46 /* Number of threads running. */
47 unsigned int __nptl_nthreads = 1;
50 /* Code to allocate and deallocate a stack. */
51 #include "allocatestack.c"
53 /* Code to create the thread. */
54 #include <createthread.c>
59 __find_in_stack_list (
63 struct pthread *result = NULL;
65 lll_lock (stack_cache_lock, LLL_PRIVATE);
67 list_for_each (entry, &stack_used)
71 curp = list_entry (entry, struct pthread, list);
80 list_for_each (entry, &__stack_user)
84 curp = list_entry (entry, struct pthread, list);
92 lll_unlock (stack_cache_lock, LLL_PRIVATE);
98 /* Deallocate POSIX thread-local-storage. */
101 __nptl_deallocate_tsd (void)
103 struct pthread *self = THREAD_SELF;
105 /* Maybe no data was ever allocated. This happens often so we have
107 if (THREAD_GETMEM (self, specific_used))
117 /* So far no new nonzero data entry. */
118 THREAD_SETMEM (self, specific_used, false);
120 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
122 struct pthread_key_data *level2;
124 level2 = THREAD_GETMEM_NC (self, specific, cnt);
130 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
133 void *data = level2[inner].data;
137 /* Always clear the data. */
138 level2[inner].data = NULL;
140 /* Make sure the data corresponds to a valid
141 key. This test fails if the key was
142 deallocated and also if it was
143 re-allocated. It is the user's
144 responsibility to free the memory in this
146 if (level2[inner].seq
147 == __pthread_keys[idx].seq
148 /* It is not necessary to register a destructor
150 && __pthread_keys[idx].destr != NULL)
151 /* Call the user-provided destructor. */
152 __pthread_keys[idx].destr (data);
157 idx += PTHREAD_KEY_1STLEVEL_SIZE;
160 if (THREAD_GETMEM (self, specific_used) == 0)
161 /* No data has been modified. */
164 /* We only repeat the process a fixed number of times. */
165 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
167 /* Just clear the memory of the first block for reuse. */
168 memset (&THREAD_SELF->specific_1stblock, '\0',
169 sizeof (self->specific_1stblock));
172 /* Free the memory for the other blocks. */
173 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
175 struct pthread_key_data *level2;
177 level2 = THREAD_GETMEM_NC (self, specific, cnt);
180 /* The first block is allocated as part of the thread
183 THREAD_SETMEM_NC (self, specific, cnt, NULL);
187 THREAD_SETMEM (self, specific_used, false);
192 /* Deallocate a thread's stack after optionally making sure the thread
193 descriptor is still valid. */
196 __free_tcb (struct pthread *pd)
198 /* The thread is exiting now. */
199 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
200 TERMINATED_BIT) == 0, 1))
202 /* Remove the descriptor from the list. */
203 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
204 /* Something is really wrong. The descriptor for a still
205 running thread is gone. */
209 if (__builtin_expect (pd->tpp != NULL, 0))
211 struct priority_protection_data *tpp = pd->tpp;
217 /* Queue the stack memory block for reuse and exit the process. The
218 kernel will signal via writing to the address returned by
219 QUEUE-STACK when the stack is available. */
220 __deallocate_stack (pd);
226 start_thread (void *arg)
228 struct pthread *pd = (struct pthread *) arg;
231 /* Remember the time when the thread was started. */
234 THREAD_SETMEM (pd, cpuclock_offset, now);
236 #if defined __UCLIBC_HAS_IPV4__ || defined __UCLIBC_HAS_IPV6__
237 /* Initialize resolver state pointer. */
240 #ifdef __NR_set_robust_list
241 # ifndef __ASSUME_SET_ROBUST_LIST
242 if (__set_robust_list_avail >= 0)
245 INTERNAL_SYSCALL_DECL (err);
246 /* This call should never fail because the initial call in init.c
248 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
249 sizeof (struct robust_list_head));
253 /* If the parent was running cancellation handlers while creating
254 the thread the new thread inherited the signal mask. Reset the
255 cancellation signal mask. */
256 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
258 INTERNAL_SYSCALL_DECL (err);
260 __sigemptyset (&mask);
261 __sigaddset (&mask, SIGCANCEL);
262 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
266 /* This is where the try/finally block should be created. For
267 compilers without that support we do use setjmp. */
268 struct pthread_unwind_buf unwind_buf;
270 /* No previous handlers. */
271 unwind_buf.priv.data.prev = NULL;
272 unwind_buf.priv.data.cleanup = NULL;
275 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
276 if (__builtin_expect (! not_first_call, 1))
278 /* Store the new cleanup handler info. */
279 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
281 if (__builtin_expect (pd->stopped_start, 0))
283 int oldtype = CANCEL_ASYNC ();
285 /* Get the lock the parent locked to force synchronization. */
286 lll_lock (pd->lock, LLL_PRIVATE);
287 /* And give it up right away. */
288 lll_unlock (pd->lock, LLL_PRIVATE);
290 CANCEL_RESET (oldtype);
293 /* Run the code the user provided. */
294 #ifdef CALL_THREAD_FCT
295 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
297 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
301 /* Run the destructor for the thread-local data. */
302 __nptl_deallocate_tsd ();
304 /* Clean up any state libc stored in thread-local variables. */
306 __libc_thread_freeres ();
308 /* If this is the last thread we terminate the process now. We
309 do not notify the debugger, it might just irritate it if there
310 is no thread left. */
311 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
312 /* This was the last thread. */
315 /* Report the death of the thread if this is wanted. */
316 if (__builtin_expect (pd->report_events, 0))
318 /* See whether TD_DEATH is in any of the mask. */
319 const int idx = __td_eventword (TD_DEATH);
320 const uint32_t mask = __td_eventmask (TD_DEATH);
322 if ((mask & (__nptl_threads_events.event_bits[idx]
323 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
325 /* Yep, we have to signal the death. Add the descriptor to
326 the list but only if it is not already on it. */
327 if (pd->nextevent == NULL)
329 pd->eventbuf.eventnum = TD_DEATH;
330 pd->eventbuf.eventdata = pd;
333 pd->nextevent = __nptl_last_event;
334 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
338 /* Now call the function to signal the event. */
339 __nptl_death_event ();
343 /* The thread is exiting now. Don't set this bit until after we've hit
344 the event-reporting breakpoint, so that td_thr_get_info on us while at
345 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
346 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
348 #ifndef __ASSUME_SET_ROBUST_LIST
349 /* If this thread has any robust mutexes locked, handle them now. */
350 # if __WORDSIZE == 64
351 void *robust = pd->robust_head.list;
353 __pthread_slist_t *robust = pd->robust_list.__next;
355 /* We let the kernel do the notification if it is able to do so.
356 If we have to do it here there for sure are no PI mutexes involved
357 since the kernel support for them is even more recent. */
358 if (__set_robust_list_avail < 0
359 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
363 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
364 ((char *) robust - offsetof (struct __pthread_mutex_s,
366 robust = *((void **) robust);
368 # ifdef __PTHREAD_MUTEX_HAVE_PREV
369 this->__list.__prev = NULL;
371 this->__list.__next = NULL;
373 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
375 while (robust != (void *) &pd->robust_head);
379 /* Mark the memory of the stack as usable to the kernel. We free
380 everything except for the space used for the TCB itself. */
381 size_t pagesize_m1 = __getpagesize () - 1;
382 #ifdef _STACK_GROWS_DOWN
383 char *sp = CURRENT_STACK_FRAME;
384 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
388 assert (freesize < pd->stackblock_size);
389 if (freesize > PTHREAD_STACK_MIN)
390 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
392 /* If the thread is detached free the TCB. */
393 if (IS_DETACHED (pd))
396 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
398 /* Some other thread might call any of the setXid functions and expect
399 us to reply. In this case wait until we did that. */
401 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
402 while (pd->cancelhandling & SETXID_BITMASK);
404 /* Reset the value so that the stack can be reused. */
405 pd->setxid_futex = 0;
408 /* We cannot call '_exit' here. '_exit' will terminate the process.
410 The 'exit' implementation in the kernel will signal when the
411 process is really dead since 'clone' got passed the CLONE_CLEARTID
412 flag. The 'tid' field in the TCB will be set to zero.
414 The exit code is zero since in case all threads exit by calling
415 'pthread_exit' the exit status must be 0 (zero). */
416 __exit_thread_inline (0);
423 /* Default thread attributes for the case when the user does not
425 static const struct pthread_attr default_attr =
427 /* Just some value > 0 which gets rounded to the nearest page size. */
433 __pthread_create_2_1 (
434 pthread_t *newthread,
435 const pthread_attr_t *attr,
436 void *(*start_routine) (void *),
441 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
443 /* Is this the best idea? On NUMA machines this could mean
444 accessing far-away memory. */
445 iattr = &default_attr;
447 struct pthread *pd = NULL;
448 int err = ALLOCATE_STACK (iattr, &pd);
449 if (__builtin_expect (err != 0, 0))
450 /* Something went wrong. Maybe a parameter of the attributes is
451 invalid or we could not allocate memory. */
455 /* Initialize the TCB. All initializations with zero should be
456 performed in 'get_cached_stack'. This way we avoid doing this if
457 the stack freshly allocated with 'mmap'. */
460 /* Reference to the TCB itself. */
461 pd->header.self = pd;
463 /* Self-reference for TLS. */
467 /* Store the address of the start routine and the parameter. Since
468 we do not start the function directly the stillborn thread will
469 get the information from its thread descriptor. */
470 pd->start_routine = start_routine;
473 /* Copy the thread attribute flags. */
474 struct pthread *self = THREAD_SELF;
475 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
476 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
478 /* Initialize the field for the ID of the thread which is waiting
479 for us. This is a self-reference in case the thread is created
481 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
483 /* The debug events are inherited from the parent. */
484 pd->eventbuf = self->eventbuf;
487 /* Copy the parent's scheduling parameters. The flags will say what
488 is valid and what is not. */
489 pd->schedpolicy = self->schedpolicy;
490 pd->schedparam = self->schedparam;
492 /* Copy the stack guard canary. */
493 #ifdef THREAD_COPY_STACK_GUARD
494 THREAD_COPY_STACK_GUARD (pd);
497 /* Copy the pointer guard value. */
498 #ifdef THREAD_COPY_POINTER_GUARD
499 THREAD_COPY_POINTER_GUARD (pd);
502 /* Determine scheduling parameters for the thread. */
504 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
505 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
507 INTERNAL_SYSCALL_DECL (scerr);
509 /* Use the scheduling parameters the user provided. */
510 if (iattr->flags & ATTR_FLAG_POLICY_SET)
511 pd->schedpolicy = iattr->schedpolicy;
512 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
514 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
515 pd->flags |= ATTR_FLAG_POLICY_SET;
518 if (iattr->flags & ATTR_FLAG_SCHED_SET)
519 memcpy (&pd->schedparam, &iattr->schedparam,
520 sizeof (struct sched_param));
521 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
523 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
524 pd->flags |= ATTR_FLAG_SCHED_SET;
527 /* Check for valid priorities. */
528 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
530 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
532 if (pd->schedparam.sched_priority < minprio
533 || pd->schedparam.sched_priority > maxprio)
540 /* Pass the descriptor to the caller. */
541 *newthread = (pthread_t) pd;
543 /* Remember whether the thread is detached or not. In case of an
544 error we have to free the stacks of non-detached stillborn
546 bool is_detached = IS_DETACHED (pd);
548 /* Start the thread. */
549 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
552 /* Something went wrong. Free the resources. */
556 __deallocate_stack (pd);
563 weak_alias(__pthread_create_2_1, pthread_create)
565 /* Information for libthread_db. */
567 #include "../nptl_db/db_info.c"
569 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
570 functions to be present as well. */
571 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
572 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
573 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
575 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
576 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
578 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
579 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
580 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
581 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
583 /* UCLIBC_MUTEX_xxx macros expects to have these as well */
584 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_init)
585 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_push_defer)
586 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_pop_restore)