1 /* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 #include <hp-timing.h>
29 #include <bits/kernel-features.h>
32 /* Local function to start thread and handle cleanup. */
33 static int start_thread (void *arg);
36 /* Nozero if debugging mode is enabled. */
39 /* Globally enabled events. */
40 static td_thr_events_t __nptl_threads_events __attribute_used__;
42 /* Pointer to descriptor with the last event. */
43 static struct pthread *__nptl_last_event __attribute_used__;
45 /* Number of threads running. */
46 unsigned int __nptl_nthreads = 1;
49 /* Code to allocate and deallocate a stack. */
50 #include "allocatestack.c"
52 /* Code to create the thread. */
53 #include <createthread.c>
58 __find_in_stack_list (
62 struct pthread *result = NULL;
64 lll_lock (stack_cache_lock, LLL_PRIVATE);
66 list_for_each (entry, &stack_used)
70 curp = list_entry (entry, struct pthread, list);
79 list_for_each (entry, &__stack_user)
83 curp = list_entry (entry, struct pthread, list);
91 lll_unlock (stack_cache_lock, LLL_PRIVATE);
97 /* Deallocate POSIX thread-local-storage. */
100 __nptl_deallocate_tsd (void)
102 struct pthread *self = THREAD_SELF;
104 /* Maybe no data was ever allocated. This happens often so we have
106 if (THREAD_GETMEM (self, specific_used))
116 /* So far no new nonzero data entry. */
117 THREAD_SETMEM (self, specific_used, false);
119 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
121 struct pthread_key_data *level2;
123 level2 = THREAD_GETMEM_NC (self, specific, cnt);
129 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
132 void *data = level2[inner].data;
136 /* Always clear the data. */
137 level2[inner].data = NULL;
139 /* Make sure the data corresponds to a valid
140 key. This test fails if the key was
141 deallocated and also if it was
142 re-allocated. It is the user's
143 responsibility to free the memory in this
145 if (level2[inner].seq
146 == __pthread_keys[idx].seq
147 /* It is not necessary to register a destructor
149 && __pthread_keys[idx].destr != NULL)
150 /* Call the user-provided destructor. */
151 __pthread_keys[idx].destr (data);
156 idx += PTHREAD_KEY_1STLEVEL_SIZE;
159 if (THREAD_GETMEM (self, specific_used) == 0)
160 /* No data has been modified. */
163 /* We only repeat the process a fixed number of times. */
164 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
166 /* Just clear the memory of the first block for reuse. */
167 memset (&THREAD_SELF->specific_1stblock, '\0',
168 sizeof (self->specific_1stblock));
171 /* Free the memory for the other blocks. */
172 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
174 struct pthread_key_data *level2;
176 level2 = THREAD_GETMEM_NC (self, specific, cnt);
179 /* The first block is allocated as part of the thread
182 THREAD_SETMEM_NC (self, specific, cnt, NULL);
186 THREAD_SETMEM (self, specific_used, false);
191 /* Deallocate a thread's stack after optionally making sure the thread
192 descriptor is still valid. */
195 __free_tcb (struct pthread *pd)
197 /* The thread is exiting now. */
198 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
199 TERMINATED_BIT) == 0, 1))
201 /* Remove the descriptor from the list. */
202 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
203 /* Something is really wrong. The descriptor for a still
204 running thread is gone. */
208 if (__builtin_expect (pd->tpp != NULL, 0))
210 struct priority_protection_data *tpp = pd->tpp;
216 /* Queue the stack memory block for reuse and exit the process. The
217 kernel will signal via writing to the address returned by
218 QUEUE-STACK when the stack is available. */
219 __deallocate_stack (pd);
225 start_thread (void *arg)
227 struct pthread *pd = (struct pthread *) arg;
230 /* Remember the time when the thread was started. */
233 THREAD_SETMEM (pd, cpuclock_offset, now);
235 #if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
236 /* Initialize resolver state pointer. */
239 #ifdef __NR_set_robust_list
240 # ifndef __ASSUME_SET_ROBUST_LIST
241 if (__set_robust_list_avail >= 0)
244 INTERNAL_SYSCALL_DECL (err);
245 /* This call should never fail because the initial call in init.c
247 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
248 sizeof (struct robust_list_head));
252 /* If the parent was running cancellation handlers while creating
253 the thread the new thread inherited the signal mask. Reset the
254 cancellation signal mask. */
255 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
257 INTERNAL_SYSCALL_DECL (err);
259 __sigemptyset (&mask);
260 __sigaddset (&mask, SIGCANCEL);
261 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
265 /* This is where the try/finally block should be created. For
266 compilers without that support we do use setjmp. */
267 struct pthread_unwind_buf unwind_buf;
269 /* No previous handlers. */
270 unwind_buf.priv.data.prev = NULL;
271 unwind_buf.priv.data.cleanup = NULL;
274 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
275 if (__builtin_expect (! not_first_call, 1))
277 /* Store the new cleanup handler info. */
278 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
280 if (__builtin_expect (pd->stopped_start, 0))
282 int oldtype = CANCEL_ASYNC ();
284 /* Get the lock the parent locked to force synchronization. */
285 lll_lock (pd->lock, LLL_PRIVATE);
286 /* And give it up right away. */
287 lll_unlock (pd->lock, LLL_PRIVATE);
289 CANCEL_RESET (oldtype);
292 /* Run the code the user provided. */
293 #ifdef CALL_THREAD_FCT
294 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
296 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
300 /* Run the destructor for the thread-local data. */
301 __nptl_deallocate_tsd ();
303 /* Clean up any state libc stored in thread-local variables. */
305 __libc_thread_freeres ();
307 /* If this is the last thread we terminate the process now. We
308 do not notify the debugger, it might just irritate it if there
309 is no thread left. */
310 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
311 /* This was the last thread. */
314 /* Report the death of the thread if this is wanted. */
315 if (__builtin_expect (pd->report_events, 0))
317 /* See whether TD_DEATH is in any of the mask. */
318 const int idx = __td_eventword (TD_DEATH);
319 const uint32_t mask = __td_eventmask (TD_DEATH);
321 if ((mask & (__nptl_threads_events.event_bits[idx]
322 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
324 /* Yep, we have to signal the death. Add the descriptor to
325 the list but only if it is not already on it. */
326 if (pd->nextevent == NULL)
328 pd->eventbuf.eventnum = TD_DEATH;
329 pd->eventbuf.eventdata = pd;
332 pd->nextevent = __nptl_last_event;
333 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
337 /* Now call the function to signal the event. */
338 __nptl_death_event ();
342 /* The thread is exiting now. Don't set this bit until after we've hit
343 the event-reporting breakpoint, so that td_thr_get_info on us while at
344 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
345 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
347 #ifndef __ASSUME_SET_ROBUST_LIST
348 /* If this thread has any robust mutexes locked, handle them now. */
349 # if __WORDSIZE == 64
350 void *robust = pd->robust_head.list;
352 __pthread_slist_t *robust = pd->robust_list.__next;
354 /* We let the kernel do the notification if it is able to do so.
355 If we have to do it here there for sure are no PI mutexes involved
356 since the kernel support for them is even more recent. */
357 if (__set_robust_list_avail < 0
358 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
362 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
363 ((char *) robust - offsetof (struct __pthread_mutex_s,
365 robust = *((void **) robust);
367 # ifdef __PTHREAD_MUTEX_HAVE_PREV
368 this->__list.__prev = NULL;
370 this->__list.__next = NULL;
372 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
374 while (robust != (void *) &pd->robust_head);
378 /* Mark the memory of the stack as usable to the kernel. We free
379 everything except for the space used for the TCB itself. */
380 size_t pagesize_m1 = __getpagesize () - 1;
381 char *sp = CURRENT_STACK_FRAME;
382 #ifdef _STACK_GROWS_DOWN
383 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
385 size_t freesize = ((char *) pd->stackblock - sp) & ~pagesize_m1;
387 assert (freesize < pd->stackblock_size);
388 if (freesize > PTHREAD_STACK_MIN)
389 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
391 /* If the thread is detached free the TCB. */
392 if (IS_DETACHED (pd))
395 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
397 /* Some other thread might call any of the setXid functions and expect
398 us to reply. In this case wait until we did that. */
400 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
401 while (pd->cancelhandling & SETXID_BITMASK);
403 /* Reset the value so that the stack can be reused. */
404 pd->setxid_futex = 0;
407 /* We cannot call '_exit' here. '_exit' will terminate the process.
409 The 'exit' implementation in the kernel will signal when the
410 process is really dead since 'clone' got passed the CLONE_CLEARTID
411 flag. The 'tid' field in the TCB will be set to zero.
413 The exit code is zero since in case all threads exit by calling
414 'pthread_exit' the exit status must be 0 (zero). */
415 __exit_thread_inline (0);
422 /* Default thread attributes for the case when the user does not
424 static const struct pthread_attr default_attr =
426 /* Just some value > 0 which gets rounded to the nearest page size. */
432 __pthread_create_2_1 (
433 pthread_t *newthread,
434 const pthread_attr_t *attr,
435 void *(*start_routine) (void *),
440 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
442 /* Is this the best idea? On NUMA machines this could mean
443 accessing far-away memory. */
444 iattr = &default_attr;
446 struct pthread *pd = NULL;
447 int err = ALLOCATE_STACK (iattr, &pd);
448 if (__builtin_expect (err != 0, 0))
449 /* Something went wrong. Maybe a parameter of the attributes is
450 invalid or we could not allocate memory. */
454 /* Initialize the TCB. All initializations with zero should be
455 performed in 'get_cached_stack'. This way we avoid doing this if
456 the stack freshly allocated with 'mmap'. */
459 /* Reference to the TCB itself. */
460 pd->header.self = pd;
462 /* Self-reference for TLS. */
466 /* Store the address of the start routine and the parameter. Since
467 we do not start the function directly the stillborn thread will
468 get the information from its thread descriptor. */
469 pd->start_routine = start_routine;
472 /* Copy the thread attribute flags. */
473 struct pthread *self = THREAD_SELF;
474 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
475 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
477 /* Initialize the field for the ID of the thread which is waiting
478 for us. This is a self-reference in case the thread is created
480 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
482 /* The debug events are inherited from the parent. */
483 pd->eventbuf = self->eventbuf;
486 /* Copy the parent's scheduling parameters. The flags will say what
487 is valid and what is not. */
488 pd->schedpolicy = self->schedpolicy;
489 pd->schedparam = self->schedparam;
491 /* Copy the stack guard canary. */
492 #ifdef THREAD_COPY_STACK_GUARD
493 THREAD_COPY_STACK_GUARD (pd);
496 /* Copy the pointer guard value. */
497 #ifdef THREAD_COPY_POINTER_GUARD
498 THREAD_COPY_POINTER_GUARD (pd);
501 /* Determine scheduling parameters for the thread. */
503 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
504 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
506 INTERNAL_SYSCALL_DECL (scerr);
508 /* Use the scheduling parameters the user provided. */
509 if (iattr->flags & ATTR_FLAG_POLICY_SET)
510 pd->schedpolicy = iattr->schedpolicy;
511 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
513 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
514 pd->flags |= ATTR_FLAG_POLICY_SET;
517 if (iattr->flags & ATTR_FLAG_SCHED_SET)
518 memcpy (&pd->schedparam, &iattr->schedparam,
519 sizeof (struct sched_param));
520 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
522 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
523 pd->flags |= ATTR_FLAG_SCHED_SET;
526 /* Check for valid priorities. */
527 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
529 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
531 if (pd->schedparam.sched_priority < minprio
532 || pd->schedparam.sched_priority > maxprio)
539 /* Pass the descriptor to the caller. */
540 *newthread = (pthread_t) pd;
542 /* Remember whether the thread is detached or not. In case of an
543 error we have to free the stacks of non-detached stillborn
545 bool is_detached = IS_DETACHED (pd);
547 /* Start the thread. */
548 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
551 /* Something went wrong. Free the resources. */
555 __deallocate_stack (pd);
562 weak_alias(__pthread_create_2_1, pthread_create)
564 /* Information for libthread_db. */
566 #include "../nptl_db/db_info.c"
568 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
569 functions to be present as well. */
570 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
571 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
572 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
574 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
575 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
577 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
578 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
579 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
580 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
582 /* UCLIBC_MUTEX_xxx macros expects to have these as well */
583 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_init)
584 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_push_defer)
585 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_pop_restore)