OSDN Git Service

Replace FSF snail mail address with URLs
[uclinux-h8/uClibc.git] / libpthread / nptl / sysdeps / pthread / bits / libc-lock.h
1 /* libc-internal interface for mutex locks.  NPTL version.
2    Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public License as
7    published by the Free Software Foundation; either version 2.1 of the
8    License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; see the file COPYING.LIB.  If
17    not, see <http://www.gnu.org/licenses/>.  */
18
19 #ifndef _BITS_LIBC_LOCK_H
20 #define _BITS_LIBC_LOCK_H 1
21
22 #include <bits/initspin.h>
23 #include <pthread.h>
24 #define __need_NULL
25 #include <stddef.h>
26
27
28 /* Fortunately Linux now has a mean to do locking which is realtime
29    safe without the aid of the thread library.  We also need no fancy
30    options like error checking mutexes etc.  We only need simple
31    locks, maybe recursive.  This can be easily and cheaply implemented
32    using futexes.  We will use them everywhere except in ld.so since
33    ld.so might be used on old kernels with a different libc.so.  */
34 #ifdef _LIBC
35 # include <lowlevellock.h>
36 # include <tls.h>
37 # include <pthread-functions.h>
38 #endif
39
40 /* Mutex type.  */
41 #if defined _LIBC || defined _IO_MTSAFE_IO
42 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43 typedef pthread_mutex_t __libc_lock_t;
44 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
45 # else
46 typedef int __libc_lock_t;
47 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
48 # endif
49 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
50 # ifdef __USE_UNIX98
51 typedef pthread_rwlock_t __libc_rwlock_t;
52 # else
53 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
54 # endif
55 #else
56 typedef struct __libc_lock_opaque__ __libc_lock_t;
57 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
58 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
59 #endif
60
61 /* Type for key to thread-specific data.  */
62 typedef pthread_key_t __libc_key_t;
63
64 # define __libc_freeres_fn_section \
65       __attribute__ ((section ("__libc_freeres_fn")))
66
67
68 /* Define a lock variable NAME with storage class CLASS.  The lock must be
69    initialized with __libc_lock_init before it can be used (or define it
70    with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
71    declare a lock defined in another module.  In public structure
72    definitions you must use a pointer to the lock structure (i.e., NAME
73    begins with a `*'), because its storage size will not be known outside
74    of libc.  */
75 #define __libc_lock_define(CLASS,NAME) \
76   CLASS __libc_lock_t NAME;
77 #define __libc_rwlock_define(CLASS,NAME) \
78   CLASS __libc_rwlock_t NAME;
79 #define __libc_lock_define_recursive(CLASS,NAME) \
80   CLASS __libc_lock_recursive_t NAME;
81 #define __rtld_lock_define_recursive(CLASS,NAME) \
82   CLASS __rtld_lock_recursive_t NAME;
83
84 /* Define an initialized lock variable NAME with storage class CLASS.
85
86    For the C library we take a deeper look at the initializer.  For
87    this implementation all fields are initialized to zero.  Therefore
88    we don't initialize the variable which allows putting it into the
89    BSS section.  (Except on PA-RISC and other odd architectures, where
90    initialized locks must be set to one due to the lack of normal
91    atomic operations.) */
92
93 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
94 # if LLL_LOCK_INITIALIZER == 0
95 #  define __libc_lock_define_initialized(CLASS,NAME) \
96   CLASS __libc_lock_t NAME;
97 # else
98 #  define __libc_lock_define_initialized(CLASS,NAME) \
99   CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
100 # endif
101 #else
102 # if __LT_SPINLOCK_INIT == 0
103 #  define __libc_lock_define_initialized(CLASS,NAME) \
104   CLASS __libc_lock_t NAME;
105 # else
106 #  define __libc_lock_define_initialized(CLASS,NAME) \
107   CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
108 # endif
109 #endif
110
111 #define __libc_rwlock_define_initialized(CLASS,NAME) \
112   CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
113
114 /* Define an initialized recursive lock variable NAME with storage
115    class CLASS.  */
116 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
117 # if LLL_LOCK_INITIALIZER == 0
118 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
119   CLASS __libc_lock_recursive_t NAME;
120 # else
121 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
122   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
123 # endif
124 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
125   { LLL_LOCK_INITIALIZER, 0, NULL }
126 #else
127 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
128   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
129 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
130   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
131 #endif
132
133 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
134   CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
135 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
136   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
137
138 #define __rtld_lock_initialize(NAME) \
139   (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
140
141 /* If we check for a weakly referenced symbol and then perform a
142    normal jump to it te code generated for some platforms in case of
143    PIC is unnecessarily slow.  What would happen is that the function
144    is first referenced as data and then it is called indirectly
145    through the PLT.  We can make this a direct jump.  */
146 #ifdef __PIC__
147 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
148   (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
149                     _fn != NULL ? (*_fn) ARGS : ELSE; }))
150 #else
151 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
152   (FUNC != NULL ? FUNC ARGS : ELSE)
153 #endif
154
155 /* Call thread functions through the function pointer table.  */
156 #if defined SHARED && !defined NOT_IN_libc
157 # define PTFAVAIL(NAME) __libc_pthread_functions_init
158 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
159   (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
160 # define __libc_ptf_call_always(FUNC, ARGS) \
161   PTHFCT_CALL (ptr_##FUNC, ARGS)
162 #else
163 # define PTFAVAIL(NAME) (NAME != NULL)
164 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
165   __libc_maybe_call (FUNC, ARGS, ELSE)
166 # define __libc_ptf_call_always(FUNC, ARGS) \
167   FUNC ARGS
168 #endif
169
170
171 /* Initialize the named lock variable, leaving it in a consistent, unlocked
172    state.  */
173 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
174 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
175 #else
176 # define __libc_lock_init(NAME) \
177   __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
178 #endif
179 #if defined SHARED && !defined NOT_IN_libc
180 /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
181    inefficient.  */
182 # define __libc_rwlock_init(NAME) \
183   (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
184 #else
185 # define __libc_rwlock_init(NAME) \
186   __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
187 #endif
188
189 /* Same as last but this time we initialize a recursive mutex.  */
190 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
191 # define __libc_lock_init_recursive(NAME) \
192   ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
193 #else
194 # define __libc_lock_init_recursive(NAME) \
195   do {                                                                        \
196     if (__pthread_mutex_init != NULL)                                         \
197       {                                                                       \
198         pthread_mutexattr_t __attr;                                           \
199         __pthread_mutexattr_init (&__attr);                                   \
200         __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
201         __pthread_mutex_init (&(NAME).mutex, &__attr);                        \
202         __pthread_mutexattr_destroy (&__attr);                                \
203       }                                                                       \
204   } while (0)
205 #endif
206
207 #define __rtld_lock_init_recursive(NAME) \
208   do {                                                                        \
209     if (__pthread_mutex_init != NULL)                                         \
210       {                                                                       \
211         pthread_mutexattr_t __attr;                                           \
212         __pthread_mutexattr_init (&__attr);                                   \
213         __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
214         __pthread_mutex_init (&(NAME).mutex, &__attr);                        \
215         __pthread_mutexattr_destroy (&__attr);                                \
216       }                                                                       \
217   } while (0)
218
219 /* Finalize the named lock variable, which must be locked.  It cannot be
220    used again until __libc_lock_init is called again on it.  This must be
221    called on a lock variable before the containing storage is reused.  */
222 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
223 # define __libc_lock_fini(NAME) ((void) 0)
224 #else
225 # define __libc_lock_fini(NAME) \
226   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
227 #endif
228 #if defined SHARED && !defined NOT_IN_libc
229 # define __libc_rwlock_fini(NAME) ((void) 0)
230 #else
231 # define __libc_rwlock_fini(NAME) \
232   __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
233 #endif
234
235 /* Finalize recursive named lock.  */
236 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
237 # define __libc_lock_fini_recursive(NAME) ((void) 0)
238 #else
239 # define __libc_lock_fini_recursive(NAME) \
240   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
241 #endif
242
243 /* Lock the named lock variable.  */
244 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
245 # define __libc_lock_lock(NAME) \
246   ({ lll_lock (NAME, LLL_PRIVATE); 0; })
247 #else
248 # define __libc_lock_lock(NAME) \
249   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
250 #endif
251 #define __libc_rwlock_rdlock(NAME) \
252   __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
253 #define __libc_rwlock_wrlock(NAME) \
254   __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
255
256 /* Lock the recursive named lock variable.  */
257 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
258 # define __libc_lock_lock_recursive(NAME) \
259   do {                                                                        \
260     void *self = THREAD_SELF;                                                 \
261     if ((NAME).owner != self)                                                 \
262       {                                                                       \
263         lll_lock ((NAME).lock, LLL_PRIVATE);                                  \
264         (NAME).owner = self;                                                  \
265       }                                                                       \
266     ++(NAME).cnt;                                                             \
267   } while (0)
268 #else
269 # define __libc_lock_lock_recursive(NAME) \
270   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
271 #endif
272
273 /* Try to lock the named lock variable.  */
274 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
275 # define __libc_lock_trylock(NAME) \
276   lll_trylock (NAME)
277 #else
278 # define __libc_lock_trylock(NAME) \
279   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
280 #endif
281 #define __libc_rwlock_tryrdlock(NAME) \
282   __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
283 #define __libc_rwlock_trywrlock(NAME) \
284   __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
285
286 /* Try to lock the recursive named lock variable.  */
287 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
288 # define __libc_lock_trylock_recursive(NAME) \
289   ({                                                                          \
290     int result = 0;                                                           \
291     void *self = THREAD_SELF;                                                 \
292     if ((NAME).owner != self)                                                 \
293       {                                                                       \
294         if (lll_trylock ((NAME).lock) == 0)                                   \
295           {                                                                   \
296             (NAME).owner = self;                                              \
297             (NAME).cnt = 1;                                                   \
298           }                                                                   \
299         else                                                                  \
300           result = EBUSY;                                                     \
301       }                                                                       \
302     else                                                                      \
303       ++(NAME).cnt;                                                           \
304     result;                                                                   \
305   })
306 #else
307 # define __libc_lock_trylock_recursive(NAME) \
308   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
309 #endif
310
311 #define __rtld_lock_trylock_recursive(NAME) \
312   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
313
314 /* Unlock the named lock variable.  */
315 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
316 # define __libc_lock_unlock(NAME) \
317   lll_unlock (NAME, LLL_PRIVATE)
318 #else
319 # define __libc_lock_unlock(NAME) \
320   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
321 #endif
322 #define __libc_rwlock_unlock(NAME) \
323   __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
324
325 /* Unlock the recursive named lock variable.  */
326 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
327 /* We do no error checking here.  */
328 # define __libc_lock_unlock_recursive(NAME) \
329   do {                                                                        \
330     if (--(NAME).cnt == 0)                                                    \
331       {                                                                       \
332         (NAME).owner = NULL;                                                  \
333         lll_unlock ((NAME).lock, LLL_PRIVATE);                                \
334       }                                                                       \
335   } while (0)
336 #else
337 # define __libc_lock_unlock_recursive(NAME) \
338   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
339 #endif
340
341 #if defined _LIBC && defined SHARED
342 # define __rtld_lock_default_lock_recursive(lock) \
343   ++((pthread_mutex_t *)(lock))->__data.__count;
344
345 # define __rtld_lock_default_unlock_recursive(lock) \
346   --((pthread_mutex_t *)(lock))->__data.__count;
347
348 # define __rtld_lock_lock_recursive(NAME) \
349   GL(dl_rtld_lock_recursive) (&(NAME).mutex)
350
351 # define __rtld_lock_unlock_recursive(NAME) \
352   GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
353 #else
354 # define __rtld_lock_lock_recursive(NAME) \
355   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
356
357 # define __rtld_lock_unlock_recursive(NAME) \
358   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
359 #endif
360
361 /* Define once control variable.  */
362 #if PTHREAD_ONCE_INIT == 0
363 /* Special case for static variables where we can avoid the initialization
364    if it is zero.  */
365 # define __libc_once_define(CLASS, NAME) \
366   CLASS pthread_once_t NAME
367 #else
368 # define __libc_once_define(CLASS, NAME) \
369   CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
370 #endif
371
372 /* Call handler iff the first call.  */
373 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
374   do {                                                                        \
375     if (PTFAVAIL (__pthread_once))                                            \
376       __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),               \
377                                                INIT_FUNCTION));               \
378     else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {                           \
379       INIT_FUNCTION ();                                                       \
380       (ONCE_CONTROL) |= 2;                                                    \
381     }                                                                         \
382   } while (0)
383
384
385 /* Note that for I/O cleanup handling we are using the old-style
386    cancel handling.  It does not have to be integrated with C++ snce
387    no C++ code is called in the middle.  The old-style handling is
388    faster and the support is not going away.  */
389 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
390                                    void (*routine) (void *), void *arg);
391 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
392                                   int execute);
393 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
394                                          void (*routine) (void *), void *arg);
395 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
396                                           int execute);
397
398 /* Start critical region with cleanup.  */
399 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
400   { struct _pthread_cleanup_buffer _buffer;                                   \
401     int _avail;                                                               \
402     if (DOIT) {                                                               \
403       _avail = PTFAVAIL (_pthread_cleanup_push_defer);                        \
404       if (_avail) {                                                           \
405         __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT,  \
406                                                               ARG));          \
407       } else {                                                                \
408         _buffer.__routine = (FCT);                                            \
409         _buffer.__arg = (ARG);                                                \
410       }                                                                       \
411     } else {                                                                  \
412       _avail = 0;                                                             \
413     }
414
415 /* End critical region with cleanup.  */
416 #define __libc_cleanup_region_end(DOIT) \
417     if (_avail) {                                                             \
418       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
419     } else if (DOIT)                                                          \
420       _buffer.__routine (_buffer.__arg);                                      \
421   }
422
423 /* Sometimes we have to exit the block in the middle.  */
424 #define __libc_cleanup_end(DOIT) \
425     if (_avail) {                                                             \
426       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
427     } else if (DOIT)                                                          \
428       _buffer.__routine (_buffer.__arg)
429
430
431 /* Normal cleanup handling, based on C cleanup attribute.  */
432 static inline void
433 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
434 {
435   if (f->__do_it)
436     f->__cancel_routine (f->__cancel_arg);
437 }
438
439 #define __libc_cleanup_push(fct, arg) \
440   do {                                                                        \
441     struct __pthread_cleanup_frame __clframe                                  \
442       __attribute__ ((__cleanup__ (__libc_cleanup_routine)))                  \
443       = { .__cancel_routine = (fct), .__cancel_arg = (arg),                   \
444           .__do_it = 1 };
445
446 #define __libc_cleanup_pop(execute) \
447     __clframe.__do_it = (execute);                                            \
448   } while (0)
449
450
451 /* Create thread-specific key.  */
452 #define __libc_key_create(KEY, DESTRUCTOR) \
453   __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
454
455 /* Get thread-specific data.  */
456 #define __libc_getspecific(KEY) \
457   __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
458
459 /* Set thread-specific data.  */
460 #define __libc_setspecific(KEY, VALUE) \
461   __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
462
463 /* Register handlers to execute before and after `fork'.  Note that the
464    last parameter is NULL.  The handlers registered by the libc are
465    never removed so this is OK.  */
466 #define __libc_atfork(PREPARE, PARENT, CHILD) \
467   __register_atfork (PREPARE, PARENT, CHILD, NULL)
468 extern int __register_atfork (void (*__prepare) (void),
469                               void (*__parent) (void),
470                               void (*__child) (void),
471                               void *__dso_handle);
472
473 /* Functions that are used by this file and are internal to the GNU C
474    library.  */
475
476 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
477                                  __const pthread_mutexattr_t *__mutex_attr);
478
479 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
480
481 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
482
483 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
484
485 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
486
487 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
488
489 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
490
491 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
492                                         int __kind);
493
494 #ifdef __USE_UNIX98
495 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
496                                   __const pthread_rwlockattr_t *__attr);
497
498 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
499
500 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
501
502 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
503
504 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
505
506 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
507
508 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
509 #endif
510
511 extern int __pthread_key_create (pthread_key_t *__key,
512                                  void (*__destr_function) (void *));
513
514 extern int __pthread_setspecific (pthread_key_t __key,
515                                   __const void *__pointer);
516
517 extern void *__pthread_getspecific (pthread_key_t __key);
518
519 extern int __pthread_once (pthread_once_t *__once_control,
520                            void (*__init_routine) (void));
521
522 extern int __pthread_atfork (void (*__prepare) (void),
523                              void (*__parent) (void),
524                              void (*__child) (void));
525
526
527
528 /* Make the pthread functions weak so that we can elide them from
529    single-threaded processes.  */
530 #ifndef __NO_WEAK_PTHREAD_ALIASES
531 # ifdef weak_extern
532 weak_extern (__pthread_mutex_init)
533 weak_extern (__pthread_mutex_destroy)
534 weak_extern (__pthread_mutex_lock)
535 weak_extern (__pthread_mutex_trylock)
536 weak_extern (__pthread_mutex_unlock)
537 weak_extern (__pthread_mutexattr_init)
538 weak_extern (__pthread_mutexattr_destroy)
539 weak_extern (__pthread_mutexattr_settype)
540 weak_extern (__pthread_rwlock_init)
541 weak_extern (__pthread_rwlock_destroy)
542 weak_extern (__pthread_rwlock_rdlock)
543 weak_extern (__pthread_rwlock_tryrdlock)
544 weak_extern (__pthread_rwlock_wrlock)
545 weak_extern (__pthread_rwlock_trywrlock)
546 weak_extern (__pthread_rwlock_unlock)
547 weak_extern (__pthread_key_create)
548 weak_extern (__pthread_setspecific)
549 weak_extern (__pthread_getspecific)
550 weak_extern (__pthread_once)
551 //weak_extern (__pthread_initialize)
552 weak_extern (__pthread_atfork)
553 weak_extern (_pthread_cleanup_push_defer)
554 weak_extern (_pthread_cleanup_pop_restore)
555 weak_extern (pthread_setcancelstate)
556 # else
557 #  pragma weak __pthread_mutex_init
558 #  pragma weak __pthread_mutex_destroy
559 #  pragma weak __pthread_mutex_lock
560 #  pragma weak __pthread_mutex_trylock
561 #  pragma weak __pthread_mutex_unlock
562 #  pragma weak __pthread_mutexattr_init
563 #  pragma weak __pthread_mutexattr_destroy
564 #  pragma weak __pthread_mutexattr_settype
565 #  pragma weak __pthread_rwlock_destroy
566 #  pragma weak __pthread_rwlock_rdlock
567 #  pragma weak __pthread_rwlock_tryrdlock
568 #  pragma weak __pthread_rwlock_wrlock
569 #  pragma weak __pthread_rwlock_trywrlock
570 #  pragma weak __pthread_rwlock_unlock
571 #  pragma weak __pthread_key_create
572 #  pragma weak __pthread_setspecific
573 #  pragma weak __pthread_getspecific
574 #  pragma weak __pthread_once
575 //#  pragma weak __pthread_initialize
576 #  pragma weak __pthread_atfork
577 #  pragma weak _pthread_cleanup_push_defer
578 #  pragma weak _pthread_cleanup_pop_restore
579 #  pragma weak pthread_setcancelstate
580 # endif
581 #endif
582
583 #endif  /* bits/libc-lock.h */