1 /* libc-internal interface for mutex locks. NPTL version.
2 Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <http://www.gnu.org/licenses/>. */
19 #ifndef _BITS_LIBC_LOCK_H
20 #define _BITS_LIBC_LOCK_H 1
22 #include <bits/initspin.h>
28 /* Fortunately Linux now has a mean to do locking which is realtime
29 safe without the aid of the thread library. We also need no fancy
30 options like error checking mutexes etc. We only need simple
31 locks, maybe recursive. This can be easily and cheaply implemented
32 using futexes. We will use them everywhere except in ld.so since
33 ld.so might be used on old kernels with a different libc.so. */
35 # include <lowlevellock.h>
37 # include <pthread-functions.h>
41 #if defined _LIBC || defined _IO_MTSAFE_IO
42 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43 typedef pthread_mutex_t __libc_lock_t;
44 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
46 typedef int __libc_lock_t;
47 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
49 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
51 typedef pthread_rwlock_t __libc_rwlock_t;
53 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
56 typedef struct __libc_lock_opaque__ __libc_lock_t;
57 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
58 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
61 /* Type for key to thread-specific data. */
62 typedef pthread_key_t __libc_key_t;
64 # define __libc_freeres_fn_section \
65 __attribute__ ((section ("__libc_freeres_fn")))
68 /* Define a lock variable NAME with storage class CLASS. The lock must be
69 initialized with __libc_lock_init before it can be used (or define it
70 with __libc_lock_define_initialized, below). Use `extern' for CLASS to
71 declare a lock defined in another module. In public structure
72 definitions you must use a pointer to the lock structure (i.e., NAME
73 begins with a `*'), because its storage size will not be known outside
75 #define __libc_lock_define(CLASS,NAME) \
76 CLASS __libc_lock_t NAME;
77 #define __libc_rwlock_define(CLASS,NAME) \
78 CLASS __libc_rwlock_t NAME;
79 #define __libc_lock_define_recursive(CLASS,NAME) \
80 CLASS __libc_lock_recursive_t NAME;
81 #define __rtld_lock_define_recursive(CLASS,NAME) \
82 CLASS __rtld_lock_recursive_t NAME;
84 /* Define an initialized lock variable NAME with storage class CLASS.
86 For the C library we take a deeper look at the initializer. For
87 this implementation all fields are initialized to zero. Therefore
88 we don't initialize the variable which allows putting it into the
89 BSS section. (Except on PA-RISC and other odd architectures, where
90 initialized locks must be set to one due to the lack of normal
91 atomic operations.) */
93 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
94 # if LLL_LOCK_INITIALIZER == 0
95 # define __libc_lock_define_initialized(CLASS,NAME) \
96 CLASS __libc_lock_t NAME;
98 # define __libc_lock_define_initialized(CLASS,NAME) \
99 CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
102 # if __LT_SPINLOCK_INIT == 0
103 # define __libc_lock_define_initialized(CLASS,NAME) \
104 CLASS __libc_lock_t NAME;
106 # define __libc_lock_define_initialized(CLASS,NAME) \
107 CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
111 #define __libc_rwlock_define_initialized(CLASS,NAME) \
112 CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
114 /* Define an initialized recursive lock variable NAME with storage
116 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
117 # if LLL_LOCK_INITIALIZER == 0
118 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
119 CLASS __libc_lock_recursive_t NAME;
121 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
122 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
124 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
125 { LLL_LOCK_INITIALIZER, 0, NULL }
127 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
128 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
129 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
130 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
133 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
134 CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
135 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
136 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
138 #define __rtld_lock_initialize(NAME) \
139 (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
141 /* If we check for a weakly referenced symbol and then perform a
142 normal jump to it te code generated for some platforms in case of
143 PIC is unnecessarily slow. What would happen is that the function
144 is first referenced as data and then it is called indirectly
145 through the PLT. We can make this a direct jump. */
147 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
148 (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
149 _fn != NULL ? (*_fn) ARGS : ELSE; }))
151 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
152 (FUNC != NULL ? FUNC ARGS : ELSE)
155 /* Call thread functions through the function pointer table. */
156 #if defined SHARED && !defined NOT_IN_libc
157 # define PTFAVAIL(NAME) __libc_pthread_functions_init
158 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
159 (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
160 # define __libc_ptf_call_always(FUNC, ARGS) \
161 PTHFCT_CALL (ptr_##FUNC, ARGS)
163 # define PTFAVAIL(NAME) (NAME != NULL)
164 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
165 __libc_maybe_call (FUNC, ARGS, ELSE)
166 # define __libc_ptf_call_always(FUNC, ARGS) \
171 /* Initialize the named lock variable, leaving it in a consistent, unlocked
173 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
174 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
176 # define __libc_lock_init(NAME) \
177 __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
179 #if defined SHARED && !defined NOT_IN_libc
180 /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
182 # define __libc_rwlock_init(NAME) \
183 (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
185 # define __libc_rwlock_init(NAME) \
186 __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
189 /* Same as last but this time we initialize a recursive mutex. */
190 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
191 # define __libc_lock_init_recursive(NAME) \
192 ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
194 # define __libc_lock_init_recursive(NAME) \
196 if (__pthread_mutex_init != NULL) \
198 pthread_mutexattr_t __attr; \
199 __pthread_mutexattr_init (&__attr); \
200 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
201 __pthread_mutex_init (&(NAME).mutex, &__attr); \
202 __pthread_mutexattr_destroy (&__attr); \
207 #define __rtld_lock_init_recursive(NAME) \
209 if (__pthread_mutex_init != NULL) \
211 pthread_mutexattr_t __attr; \
212 __pthread_mutexattr_init (&__attr); \
213 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
214 __pthread_mutex_init (&(NAME).mutex, &__attr); \
215 __pthread_mutexattr_destroy (&__attr); \
219 /* Finalize the named lock variable, which must be locked. It cannot be
220 used again until __libc_lock_init is called again on it. This must be
221 called on a lock variable before the containing storage is reused. */
222 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
223 # define __libc_lock_fini(NAME) ((void) 0)
225 # define __libc_lock_fini(NAME) \
226 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
228 #if defined SHARED && !defined NOT_IN_libc
229 # define __libc_rwlock_fini(NAME) ((void) 0)
231 # define __libc_rwlock_fini(NAME) \
232 __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
235 /* Finalize recursive named lock. */
236 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
237 # define __libc_lock_fini_recursive(NAME) ((void) 0)
239 # define __libc_lock_fini_recursive(NAME) \
240 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
243 /* Lock the named lock variable. */
244 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
245 # define __libc_lock_lock(NAME) \
246 ({ lll_lock (NAME, LLL_PRIVATE); 0; })
248 # define __libc_lock_lock(NAME) \
249 __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
251 #define __libc_rwlock_rdlock(NAME) \
252 __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
253 #define __libc_rwlock_wrlock(NAME) \
254 __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
256 /* Lock the recursive named lock variable. */
257 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
258 # define __libc_lock_lock_recursive(NAME) \
260 void *self = THREAD_SELF; \
261 if ((NAME).owner != self) \
263 lll_lock ((NAME).lock, LLL_PRIVATE); \
264 (NAME).owner = self; \
269 # define __libc_lock_lock_recursive(NAME) \
270 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
273 /* Try to lock the named lock variable. */
274 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
275 # define __libc_lock_trylock(NAME) \
278 # define __libc_lock_trylock(NAME) \
279 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
281 #define __libc_rwlock_tryrdlock(NAME) \
282 __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
283 #define __libc_rwlock_trywrlock(NAME) \
284 __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
286 /* Try to lock the recursive named lock variable. */
287 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
288 # define __libc_lock_trylock_recursive(NAME) \
291 void *self = THREAD_SELF; \
292 if ((NAME).owner != self) \
294 if (lll_trylock ((NAME).lock) == 0) \
296 (NAME).owner = self; \
307 # define __libc_lock_trylock_recursive(NAME) \
308 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
311 #define __rtld_lock_trylock_recursive(NAME) \
312 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
314 /* Unlock the named lock variable. */
315 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
316 # define __libc_lock_unlock(NAME) \
317 lll_unlock (NAME, LLL_PRIVATE)
319 # define __libc_lock_unlock(NAME) \
320 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
322 #define __libc_rwlock_unlock(NAME) \
323 __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
325 /* Unlock the recursive named lock variable. */
326 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
327 /* We do no error checking here. */
328 # define __libc_lock_unlock_recursive(NAME) \
330 if (--(NAME).cnt == 0) \
332 (NAME).owner = NULL; \
333 lll_unlock ((NAME).lock, LLL_PRIVATE); \
337 # define __libc_lock_unlock_recursive(NAME) \
338 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
341 #if defined _LIBC && defined SHARED
342 # define __rtld_lock_default_lock_recursive(lock) \
343 ++((pthread_mutex_t *)(lock))->__data.__count;
345 # define __rtld_lock_default_unlock_recursive(lock) \
346 --((pthread_mutex_t *)(lock))->__data.__count;
348 # define __rtld_lock_lock_recursive(NAME) \
349 GL(dl_rtld_lock_recursive) (&(NAME).mutex)
351 # define __rtld_lock_unlock_recursive(NAME) \
352 GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
354 # define __rtld_lock_lock_recursive(NAME) \
355 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
357 # define __rtld_lock_unlock_recursive(NAME) \
358 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
361 /* Define once control variable. */
362 #if PTHREAD_ONCE_INIT == 0
363 /* Special case for static variables where we can avoid the initialization
365 # define __libc_once_define(CLASS, NAME) \
366 CLASS pthread_once_t NAME
368 # define __libc_once_define(CLASS, NAME) \
369 CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
372 /* Call handler iff the first call. */
373 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
375 if (PTFAVAIL (__pthread_once)) \
376 __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
378 else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
380 (ONCE_CONTROL) |= 2; \
385 /* Note that for I/O cleanup handling we are using the old-style
386 cancel handling. It does not have to be integrated with C++ snce
387 no C++ code is called in the middle. The old-style handling is
388 faster and the support is not going away. */
389 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
390 void (*routine) (void *), void *arg);
391 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
393 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
394 void (*routine) (void *), void *arg);
395 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
398 /* Start critical region with cleanup. */
399 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
400 { struct _pthread_cleanup_buffer _buffer; \
403 _avail = PTFAVAIL (_pthread_cleanup_push_defer); \
405 __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
408 _buffer.__routine = (FCT); \
409 _buffer.__arg = (ARG); \
415 /* End critical region with cleanup. */
416 #define __libc_cleanup_region_end(DOIT) \
418 __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
420 _buffer.__routine (_buffer.__arg); \
423 /* Sometimes we have to exit the block in the middle. */
424 #define __libc_cleanup_end(DOIT) \
426 __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
428 _buffer.__routine (_buffer.__arg)
431 /* Normal cleanup handling, based on C cleanup attribute. */
433 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
436 f->__cancel_routine (f->__cancel_arg);
439 #define __libc_cleanup_push(fct, arg) \
441 struct __pthread_cleanup_frame __clframe \
442 __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
443 = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
446 #define __libc_cleanup_pop(execute) \
447 __clframe.__do_it = (execute); \
451 /* Create thread-specific key. */
452 #define __libc_key_create(KEY, DESTRUCTOR) \
453 __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
455 /* Get thread-specific data. */
456 #define __libc_getspecific(KEY) \
457 __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
459 /* Set thread-specific data. */
460 #define __libc_setspecific(KEY, VALUE) \
461 __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
463 /* Register handlers to execute before and after `fork'. Note that the
464 last parameter is NULL. The handlers registered by the libc are
465 never removed so this is OK. */
466 #define __libc_atfork(PREPARE, PARENT, CHILD) \
467 __register_atfork (PREPARE, PARENT, CHILD, NULL)
468 extern int __register_atfork (void (*__prepare) (void),
469 void (*__parent) (void),
470 void (*__child) (void),
473 /* Functions that are used by this file and are internal to the GNU C
476 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
477 __const pthread_mutexattr_t *__mutex_attr);
479 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
481 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
483 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
485 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
487 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
489 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
491 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
495 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
496 __const pthread_rwlockattr_t *__attr);
498 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
500 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
502 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
504 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
506 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
508 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
511 extern int __pthread_key_create (pthread_key_t *__key,
512 void (*__destr_function) (void *));
514 extern int __pthread_setspecific (pthread_key_t __key,
515 __const void *__pointer);
517 extern void *__pthread_getspecific (pthread_key_t __key);
519 extern int __pthread_once (pthread_once_t *__once_control,
520 void (*__init_routine) (void));
522 extern int __pthread_atfork (void (*__prepare) (void),
523 void (*__parent) (void),
524 void (*__child) (void));
528 /* Make the pthread functions weak so that we can elide them from
529 single-threaded processes. */
530 #ifndef __NO_WEAK_PTHREAD_ALIASES
532 weak_extern (__pthread_mutex_init)
533 weak_extern (__pthread_mutex_destroy)
534 weak_extern (__pthread_mutex_lock)
535 weak_extern (__pthread_mutex_trylock)
536 weak_extern (__pthread_mutex_unlock)
537 weak_extern (__pthread_mutexattr_init)
538 weak_extern (__pthread_mutexattr_destroy)
539 weak_extern (__pthread_mutexattr_settype)
540 weak_extern (__pthread_rwlock_init)
541 weak_extern (__pthread_rwlock_destroy)
542 weak_extern (__pthread_rwlock_rdlock)
543 weak_extern (__pthread_rwlock_tryrdlock)
544 weak_extern (__pthread_rwlock_wrlock)
545 weak_extern (__pthread_rwlock_trywrlock)
546 weak_extern (__pthread_rwlock_unlock)
547 weak_extern (__pthread_key_create)
548 weak_extern (__pthread_setspecific)
549 weak_extern (__pthread_getspecific)
550 weak_extern (__pthread_once)
551 //weak_extern (__pthread_initialize)
552 weak_extern (__pthread_atfork)
553 weak_extern (_pthread_cleanup_push_defer)
554 weak_extern (_pthread_cleanup_pop_restore)
555 weak_extern (pthread_setcancelstate)
557 # pragma weak __pthread_mutex_init
558 # pragma weak __pthread_mutex_destroy
559 # pragma weak __pthread_mutex_lock
560 # pragma weak __pthread_mutex_trylock
561 # pragma weak __pthread_mutex_unlock
562 # pragma weak __pthread_mutexattr_init
563 # pragma weak __pthread_mutexattr_destroy
564 # pragma weak __pthread_mutexattr_settype
565 # pragma weak __pthread_rwlock_destroy
566 # pragma weak __pthread_rwlock_rdlock
567 # pragma weak __pthread_rwlock_tryrdlock
568 # pragma weak __pthread_rwlock_wrlock
569 # pragma weak __pthread_rwlock_trywrlock
570 # pragma weak __pthread_rwlock_unlock
571 # pragma weak __pthread_key_create
572 # pragma weak __pthread_setspecific
573 # pragma weak __pthread_getspecific
574 # pragma weak __pthread_once
575 //# pragma weak __pthread_initialize
576 # pragma weak __pthread_atfork
577 # pragma weak _pthread_cleanup_push_defer
578 # pragma weak _pthread_cleanup_pop_restore
579 # pragma weak pthread_setcancelstate
583 #endif /* bits/libc-lock.h */