OSDN Git Service

mass sync with glibc nptl
authorAustin Foxley <austinf@cetoncorp.com>
Tue, 16 Feb 2010 20:27:18 +0000 (12:27 -0800)
committerAustin Foxley <austinf@cetoncorp.com>
Tue, 16 Feb 2010 20:27:18 +0000 (12:27 -0800)
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
299 files changed:
.gitignore
include/atomic.h
libpthread/nptl/.gitignore
libpthread/nptl/ChangeLog
libpthread/nptl/DESIGN-barrier.txt [new file with mode: 0644]
libpthread/nptl/DESIGN-condvar.txt [new file with mode: 0644]
libpthread/nptl/DESIGN-rwlock.txt [new file with mode: 0644]
libpthread/nptl/DESIGN-sem.txt [new file with mode: 0644]
libpthread/nptl/Makefile.in
libpthread/nptl/TODO [new file with mode: 0644]
libpthread/nptl/TODO-kernel [new file with mode: 0644]
libpthread/nptl/TODO-testing [new file with mode: 0644]
libpthread/nptl/allocatestack.c
libpthread/nptl/cancellation.c
libpthread/nptl/descr.h
libpthread/nptl/forward.c
libpthread/nptl/init.c
libpthread/nptl/libc-cancellation.c
libpthread/nptl/pt-cleanup.c
libpthread/nptl/pt-system.c
libpthread/nptl/pthread-errnos.sym
libpthread/nptl/pthreadP.h
libpthread/nptl/pthread_atfork.c
libpthread/nptl/pthread_attr_destroy.c
libpthread/nptl/pthread_attr_getdetachstate.c
libpthread/nptl/pthread_attr_init.c
libpthread/nptl/pthread_attr_setschedparam.c
libpthread/nptl/pthread_attr_setstack.c
libpthread/nptl/pthread_barrier_destroy.c
libpthread/nptl/pthread_barrier_init.c
libpthread/nptl/pthread_cancel.c
libpthread/nptl/pthread_cond_destroy.c
libpthread/nptl/pthread_cond_init.c
libpthread/nptl/pthread_condattr_getclock.c
libpthread/nptl/pthread_condattr_setclock.c
libpthread/nptl/pthread_create.c
libpthread/nptl/pthread_getattr_np.c
libpthread/nptl/pthread_getschedparam.c
libpthread/nptl/pthread_join.c
libpthread/nptl/pthread_key_create.c
libpthread/nptl/pthread_mutex_consistent.c [new file with mode: 0644]
libpthread/nptl/pthread_mutex_destroy.c
libpthread/nptl/pthread_mutex_getprioceiling.c [new file with mode: 0644]
libpthread/nptl/pthread_mutex_init.c
libpthread/nptl/pthread_mutex_lock.c
libpthread/nptl/pthread_mutex_setprioceiling.c [new file with mode: 0644]
libpthread/nptl/pthread_mutex_timedlock.c
libpthread/nptl/pthread_mutex_trylock.c
libpthread/nptl/pthread_mutex_unlock.c
libpthread/nptl/pthread_mutexattr_getprioceiling.c [new file with mode: 0644]
libpthread/nptl/pthread_mutexattr_getprotocol.c [new file with mode: 0644]
libpthread/nptl/pthread_mutexattr_getpshared.c
libpthread/nptl/pthread_mutexattr_getrobust.c [new file with mode: 0644]
libpthread/nptl/pthread_mutexattr_gettype.c
libpthread/nptl/pthread_mutexattr_init.c
libpthread/nptl/pthread_mutexattr_setprioceiling.c [new file with mode: 0644]
libpthread/nptl/pthread_mutexattr_setprotocol.c [new file with mode: 0644]
libpthread/nptl/pthread_mutexattr_setpshared.c
libpthread/nptl/pthread_mutexattr_setrobust.c [new file with mode: 0644]
libpthread/nptl/pthread_mutexattr_settype.c
libpthread/nptl/pthread_rwlock_init.c
libpthread/nptl/pthread_rwlock_tryrdlock.c
libpthread/nptl/pthread_rwlock_trywrlock.c
libpthread/nptl/pthread_setschedparam.c
libpthread/nptl/pthread_setschedprio.c
libpthread/nptl/pthread_setspecific.c
libpthread/nptl/pthread_timedjoin.c
libpthread/nptl/pthread_tryjoin.c
libpthread/nptl/res.c
libpthread/nptl/sem_close.c
libpthread/nptl/sem_destroy.c
libpthread/nptl/sem_getvalue.c
libpthread/nptl/sem_init.c
libpthread/nptl/sem_open.c
libpthread/nptl/sem_unlink.c
libpthread/nptl/semaphoreP.h
libpthread/nptl/sysdeps/arm/tls.h
libpthread/nptl/sysdeps/generic/lowlevellock.h
libpthread/nptl/sysdeps/i386/pthread_spin_lock.c
libpthread/nptl/sysdeps/i386/tcb-offsets.sym
libpthread/nptl/sysdeps/i386/tls.h
libpthread/nptl/sysdeps/powerpc/tcb-offsets.sym
libpthread/nptl/sysdeps/powerpc/tls.h
libpthread/nptl/sysdeps/pthread/Makefile.in
libpthread/nptl/sysdeps/pthread/allocalim.h
libpthread/nptl/sysdeps/pthread/bits/libc-lock.h
libpthread/nptl/sysdeps/pthread/bits/sigthread.h
libpthread/nptl/sysdeps/pthread/bits/stdio-lock.h
libpthread/nptl/sysdeps/pthread/createthread.c
libpthread/nptl/sysdeps/pthread/librt-cancellation.c
libpthread/nptl/sysdeps/pthread/list.h
libpthread/nptl/sysdeps/pthread/malloc-machine.h
libpthread/nptl/sysdeps/pthread/pt-initfini.c
libpthread/nptl/sysdeps/pthread/pt-longjmp.c
libpthread/nptl/sysdeps/pthread/pthread-functions.h
libpthread/nptl/sysdeps/pthread/pthread.h
libpthread/nptl/sysdeps/pthread/pthread_barrier_wait.c
libpthread/nptl/sysdeps/pthread/pthread_cond_broadcast.c
libpthread/nptl/sysdeps/pthread/pthread_cond_signal.c
libpthread/nptl/sysdeps/pthread/pthread_cond_timedwait.c
libpthread/nptl/sysdeps/pthread/pthread_cond_wait.c
libpthread/nptl/sysdeps/pthread/pthread_once.c
libpthread/nptl/sysdeps/pthread/pthread_rwlock_rdlock.c
libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedrdlock.c
libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedwrlock.c
libpthread/nptl/sysdeps/pthread/pthread_rwlock_unlock.c
libpthread/nptl/sysdeps/pthread/pthread_rwlock_wrlock.c
libpthread/nptl/sysdeps/pthread/pthread_spin_destroy.c
libpthread/nptl/sysdeps/pthread/setxid.h
libpthread/nptl/sysdeps/pthread/sigaction.c
libpthread/nptl/sysdeps/pthread/sigfillset.c
libpthread/nptl/sysdeps/pthread/tpp.c [new file with mode: 0644]
libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c
libpthread/nptl/sysdeps/pthread/unwind-resume.c
libpthread/nptl/sysdeps/sh/tcb-offsets.sym
libpthread/nptl/sysdeps/sh/tls.h
libpthread/nptl/sysdeps/sparc/tcb-offsets.sym
libpthread/nptl/sysdeps/sparc/tls.h
libpthread/nptl/sysdeps/unix/sysv/linux/Makefile.in
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions [deleted file]
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/local_lim.h
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/alpha/vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h
libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c
libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c
libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h
libpthread/nptl/sysdeps/unix/sysv/linux/bits/local_lim.h
libpthread/nptl/sysdeps/unix/sysv/linux/bits/posix_opt.h
libpthread/nptl/sysdeps/unix/sysv/linux/fork.c
libpthread/nptl/sysdeps/unix/sysv/linux/fork.h
libpthread/nptl/sysdeps/unix/sysv/linux/getpid.c
libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/internaltypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/jmp-unwind.c
libpthread/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelbarrier.sym
libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelcond.sym
libpthread/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrwlock.sym
libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c
libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/mq_notify.c
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions [deleted file]
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions [deleted file]
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
libpthread/nptl/sysdeps/unix/sysv/linux/pt-fork.c
libpthread/nptl/sysdeps/unix/sysv/linux/pt-raise.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_getaffinity.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getaffinity.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getcpuclockid.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_kill.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c
libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/raise.c
libpthread/nptl/sysdeps/unix/sysv/linux/register-atfork.c
libpthread/nptl/sysdeps/unix/sysv/linux/sem_post.c
libpthread/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
libpthread/nptl/sysdeps/unix/sysv/linux/sem_wait.c
libpthread/nptl/sysdeps/unix/sysv/linux/sh/Makefile.arch
libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/sh/fork.c
libpthread/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h
libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sh/not-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pt-initfini.c
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/sh/vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/smp.h
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Makefile.arch
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions [deleted file]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/local_lim.h
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.c
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_once.c
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions [deleted file]
libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym [new file with mode: 0644]
libpthread/nptl/sysdeps/unix/sysv/linux/timer_create.c
libpthread/nptl/sysdeps/unix/sysv/linux/timer_delete.c
libpthread/nptl/sysdeps/unix/sysv/linux/timer_getoverr.c
libpthread/nptl/sysdeps/unix/sysv/linux/timer_routines.c
libpthread/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions [deleted file]
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
libpthread/nptl/sysdeps/x86_64/Makefile [deleted file]
libpthread/nptl/sysdeps/x86_64/tcb-offsets.sym
libpthread/nptl/sysdeps/x86_64/tls.h
libpthread/nptl/unwind.c
libpthread/nptl/vars.c
libpthread/nptl/version.c
librt/kernel-posix-timers.h

index 5ceb817..c2603a0 100644 (file)
@@ -17,6 +17,7 @@ install_dir/
 .config*
 .*.dep
 /*.log
+cscope.*
 
 #
 # Debugging files
index aff4120..6383572 100644 (file)
@@ -1,5 +1,5 @@
 /* Internal macros for atomic operations for GNU C Library.
-   Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #ifndef _ATOMIC_H
 #define _ATOMIC_H      1
 
+/* This header defines three types of macros:
+
+   - atomic arithmetic and logic operation on memory.  They all
+     have the prefix "atomic_".
+
+   - conditionally atomic operations of the same kinds.  These
+     always behave identical but can be faster when atomicity
+     is not really needed since only one thread has access to
+     the memory location.  In that case the code is slower in
+     the multi-thread case.  The interfaces have the prefix
+     "catomic_".
+
+   - support functions like barriers.  They also have the preifx
+     "atomic_".
+
+   Architectures must provide a few lowlevel macros (the compare
+   and exchange definitions).  All others are optional.  They
+   should only be provided if the architecture has specific
+   support for the operation.
+
+   As <atomic.h> macros are usually heavily nested and often use local
+   variables to make sure side-effects are evaluated properly, use for
+   macro local variables a per-macro unique prefix.  This file uses
+   __atgN_ prefix where N is different in each macro.  */
+
 #include <stdlib.h>
 
 #include <bits/atomic.h>
    and following args.  */
 #define __atomic_val_bysize(pre, post, mem, ...)                             \
   ({                                                                         \
-    __typeof (*mem) __result;                                                \
+    __typeof (*mem) __atg1_result;                                           \
     if (sizeof (*mem) == 1)                                                  \
-      __result = pre##_8_##post (mem, __VA_ARGS__);                          \
+      __atg1_result = pre##_8_##post (mem, __VA_ARGS__);                     \
     else if (sizeof (*mem) == 2)                                             \
-      __result = pre##_16_##post (mem, __VA_ARGS__);                         \
+      __atg1_result = pre##_16_##post (mem, __VA_ARGS__);                    \
     else if (sizeof (*mem) == 4)                                             \
-      __result = pre##_32_##post (mem, __VA_ARGS__);                         \
+      __atg1_result = pre##_32_##post (mem, __VA_ARGS__);                    \
     else if (sizeof (*mem) == 8)                                             \
-      __result = pre##_64_##post (mem, __VA_ARGS__);                         \
+      __atg1_result = pre##_64_##post (mem, __VA_ARGS__);                    \
     else                                                                     \
       abort ();                                                                      \
-    __result;                                                                \
+    __atg1_result;                                                           \
   })
 #define __atomic_bool_bysize(pre, post, mem, ...)                            \
   ({                                                                         \
-    int __result;                                                            \
+    int __atg2_result;                                                       \
     if (sizeof (*mem) == 1)                                                  \
-      __result = pre##_8_##post (mem, __VA_ARGS__);                          \
+      __atg2_result = pre##_8_##post (mem, __VA_ARGS__);                     \
     else if (sizeof (*mem) == 2)                                             \
-      __result = pre##_16_##post (mem, __VA_ARGS__);                         \
+      __atg2_result = pre##_16_##post (mem, __VA_ARGS__);                    \
     else if (sizeof (*mem) == 4)                                             \
-      __result = pre##_32_##post (mem, __VA_ARGS__);                         \
+      __atg2_result = pre##_32_##post (mem, __VA_ARGS__);                    \
     else if (sizeof (*mem) == 8)                                             \
-      __result = pre##_64_##post (mem, __VA_ARGS__);                         \
+      __atg2_result = pre##_64_##post (mem, __VA_ARGS__);                    \
     else                                                                     \
       abort ();                                                                      \
-    __result;                                                                \
+    __atg2_result;                                                           \
   })
 
 
 #endif
 
 
+#ifndef catomic_compare_and_exchange_val_acq
+# ifdef __arch_c_compare_and_exchange_val_32_acq
+#  define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+  __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq,                \
+                      mem, newval, oldval)
+# else
+#  define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+  atomic_compare_and_exchange_val_acq (mem, newval, oldval)
+# endif
+#endif
+
+
+#ifndef catomic_compare_and_exchange_val_rel
+# ifndef atomic_compare_and_exchange_val_rel
+#  define catomic_compare_and_exchange_val_rel(mem, newval, oldval)          \
+  catomic_compare_and_exchange_val_acq (mem, newval, oldval)
+# else
+#  define catomic_compare_and_exchange_val_rel(mem, newval, oldval)          \
+  atomic_compare_and_exchange_val_rel (mem, newval, oldval)
+# endif
+#endif
+
+
 #ifndef atomic_compare_and_exchange_val_rel
 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval)            \
   atomic_compare_and_exchange_val_acq (mem, newval, oldval)
 #  define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
   __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq,                \
                        mem, newval, oldval)
-#  else
-#   define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+# else
+#  define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+  ({ /* Cannot use __oldval here, because macros later in this file might     \
+       call this macro with __oldval argument.  */                           \
+     __typeof (oldval) __atg3_old = (oldval);                                \
+     atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old)           \
+       != __atg3_old;                                                        \
+  })
+# endif
+#endif
+
+
+#ifndef catomic_compare_and_exchange_bool_acq
+# ifdef __arch_c_compare_and_exchange_bool_32_acq
+#  define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+  __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq,                      \
+                       mem, newval, oldval)
+# else
+#  define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
   ({ /* Cannot use __oldval here, because macros later in this file might     \
        call this macro with __oldval argument.  */                           \
-     __typeof (oldval) __old = (oldval);                                     \
-     atomic_compare_and_exchange_val_acq (mem, newval, __old) != __old;              \
+     __typeof (oldval) __atg4_old = (oldval);                                \
+     catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old)          \
+       != __atg4_old;                                                        \
   })
 # endif
 #endif
 
 
+#ifndef catomic_compare_and_exchange_bool_rel
+# ifndef atomic_compare_and_exchange_bool_rel
+#  define catomic_compare_and_exchange_bool_rel(mem, newval, oldval)         \
+  catomic_compare_and_exchange_bool_acq (mem, newval, oldval)
+# else
+#  define catomic_compare_and_exchange_bool_rel(mem, newval, oldval)         \
+  atomic_compare_and_exchange_bool_rel (mem, newval, oldval)
+# endif
+#endif
+
+
 #ifndef atomic_compare_and_exchange_bool_rel
 # define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
   atomic_compare_and_exchange_bool_acq (mem, newval, oldval)
 /* Store NEWVALUE in *MEM and return the old value.  */
 #ifndef atomic_exchange_acq
 # define atomic_exchange_acq(mem, newvalue) \
-  ({ __typeof (*(mem)) __oldval;                                             \
-     __typeof (mem) __memp = (mem);                                          \
-     __typeof (*(mem)) __value = (newvalue);                                 \
+  ({ __typeof (*(mem)) __atg5_oldval;                                        \
+     __typeof (mem) __atg5_memp = (mem);                                     \
+     __typeof (*(mem)) __atg5_value = (newvalue);                            \
                                                                              \
      do                                                                              \
-       __oldval = (*__memp);                                                 \
-     while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp,   \
-                                                                   __value,  \
-                                                                   __oldval),\
-                             0));                                            \
+       __atg5_oldval = *__atg5_memp;                                         \
+     while (__builtin_expect                                                 \
+           (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
+                                                  __atg5_oldval), 0));       \
                                                                              \
-     __oldval; })
+     __atg5_oldval; })
 #endif
 
 #ifndef atomic_exchange_rel
 /* Add VALUE to *MEM and return the old value of *MEM.  */
 #ifndef atomic_exchange_and_add
 # define atomic_exchange_and_add(mem, value) \
-  ({ __typeof (*(mem)) __oldval;                                             \
-     __typeof (mem) __memp = (mem);                                          \
-     __typeof (*(mem)) __value = (value);                                    \
+  ({ __typeof (*(mem)) __atg6_oldval;                                        \
+     __typeof (mem) __atg6_memp = (mem);                                     \
+     __typeof (*(mem)) __atg6_value = (value);                               \
                                                                              \
      do                                                                              \
-       __oldval = (*__memp);                                                 \
-     while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp,   \
-                                                                   __oldval  \
-                                                                   + __value,\
-                                                                   __oldval),\
-                             0));                                            \
+       __atg6_oldval = *__atg6_memp;                                         \
+     while (__builtin_expect                                                 \
+           (atomic_compare_and_exchange_bool_acq (__atg6_memp,               \
+                                                  __atg6_oldval              \
+                                                  + __atg6_value,            \
+                                                  __atg6_oldval), 0));       \
                                                                              \
-     __oldval; })
+     __atg6_oldval; })
+#endif
+
+
+#ifndef catomic_exchange_and_add
+# define catomic_exchange_and_add(mem, value) \
+  ({ __typeof (*(mem)) __atg7_oldv;                                          \
+     __typeof (mem) __atg7_memp = (mem);                                     \
+     __typeof (*(mem)) __atg7_value = (value);                               \
+                                                                             \
+     do                                                                              \
+       __atg7_oldv = *__atg7_memp;                                           \
+     while (__builtin_expect                                                 \
+           (catomic_compare_and_exchange_bool_acq (__atg7_memp,              \
+                                                   __atg7_oldv               \
+                                                   + __atg7_value,           \
+                                                   __atg7_oldv), 0));        \
+                                                                             \
+     __atg7_oldv; })
+#endif
+
+
+#ifndef atomic_max
+# define atomic_max(mem, value) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg8_oldval;                                         \
+    __typeof (mem) __atg8_memp = (mem);                                              \
+    __typeof (*(mem)) __atg8_value = (value);                                \
+    do {                                                                     \
+      __atg8_oldval = *__atg8_memp;                                          \
+      if (__atg8_oldval >= __atg8_value)                                     \
+       break;                                                                \
+    } while (__builtin_expect                                                \
+            (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
+                                                   __atg8_oldval), 0));      \
+  } while (0)
+#endif
+
+
+#ifndef catomic_max
+# define catomic_max(mem, value) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg9_oldv;                                           \
+    __typeof (mem) __atg9_memp = (mem);                                              \
+    __typeof (*(mem)) __atg9_value = (value);                                \
+    do {                                                                     \
+      __atg9_oldv = *__atg9_memp;                                            \
+      if (__atg9_oldv >= __atg9_value)                                       \
+       break;                                                                \
+    } while (__builtin_expect                                                \
+            (catomic_compare_and_exchange_bool_acq (__atg9_memp,             \
+                                                    __atg9_value,            \
+                                                    __atg9_oldv), 0));       \
+  } while (0)
+#endif
+
+
+#ifndef atomic_min
+# define atomic_min(mem, value) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg10_oldval;                                        \
+    __typeof (mem) __atg10_memp = (mem);                                     \
+    __typeof (*(mem)) __atg10_value = (value);                               \
+    do {                                                                     \
+      __atg10_oldval = *__atg10_memp;                                        \
+      if (__atg10_oldval <= __atg10_value)                                   \
+       break;                                                                \
+    } while (__builtin_expect                                                \
+            (atomic_compare_and_exchange_bool_acq (__atg10_memp,             \
+                                                   __atg10_value,            \
+                                                   __atg10_oldval), 0));     \
+  } while (0)
 #endif
 
 
 #endif
 
 
+#ifndef catomic_add
+# define catomic_add(mem, value) \
+  (void) catomic_exchange_and_add ((mem), (value))
+#endif
+
+
 #ifndef atomic_increment
 # define atomic_increment(mem) atomic_add ((mem), 1)
 #endif
 
 
+#ifndef catomic_increment
+# define catomic_increment(mem) catomic_add ((mem), 1)
+#endif
+
+
 #ifndef atomic_increment_val
 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
 #endif
 
 
+#ifndef catomic_increment_val
+# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
+#endif
+
+
 /* Add one to *MEM and return true iff it's now zero.  */
 #ifndef atomic_increment_and_test
 # define atomic_increment_and_test(mem) \
 #endif
 
 
+#ifndef catomic_decrement
+# define catomic_decrement(mem) catomic_add ((mem), -1)
+#endif
+
+
 #ifndef atomic_decrement_val
 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
 #endif
 
 
+#ifndef catomic_decrement_val
+# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
+#endif
+
+
 /* Subtract 1 from *MEM and return true iff it's now zero.  */
 #ifndef atomic_decrement_and_test
 # define atomic_decrement_and_test(mem) \
 /* Decrement *MEM if it is > 0, and return the old value.  */
 #ifndef atomic_decrement_if_positive
 # define atomic_decrement_if_positive(mem) \
-  ({ __typeof (*(mem)) __oldval;                                             \
-     __typeof (mem) __memp = (mem);                                          \
+  ({ __typeof (*(mem)) __atg11_oldval;                                       \
+     __typeof (mem) __atg11_memp = (mem);                                    \
                                                                              \
      do                                                                              \
        {                                                                     \
-        __oldval = *__memp;                                                  \
-        if (__builtin_expect (__oldval <= 0, 0))                             \
+        __atg11_oldval = *__atg11_memp;                                      \
+        if (__builtin_expect (__atg11_oldval <= 0, 0))                       \
           break;                                                             \
        }                                                                     \
-     while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp,   \
-                                                                   __oldval  \
-                                                                   - 1,      \
-                                                                   __oldval),\
-                             0));\
-     __oldval; })
+     while (__builtin_expect                                                 \
+           (atomic_compare_and_exchange_bool_acq (__atg11_memp,              \
+                                                  __atg11_oldval - 1,        \
+                                                  __atg11_oldval), 0));      \
+     __atg11_oldval; })
 #endif
 
 
 #ifndef atomic_add_negative
 # define atomic_add_negative(mem, value)                                     \
-  ({ __typeof (value) __aan_value = (value);                                 \
-     atomic_exchange_and_add (mem, __aan_value) < -__aan_value; })
+  ({ __typeof (value) __atg12_value = (value);                               \
+     atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
 #endif
 
 
 #ifndef atomic_add_zero
 # define atomic_add_zero(mem, value)                                         \
-  ({ __typeof (value) __aaz_value = (value);                                 \
-     atomic_exchange_and_add (mem, __aaz_value) == -__aaz_value; })
+  ({ __typeof (value) __atg13_value = (value);                               \
+     atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
 #endif
 
 
 
 #ifndef atomic_bit_test_set
 # define atomic_bit_test_set(mem, bit) \
-  ({ __typeof (*(mem)) __oldval;                                             \
-     __typeof (mem) __memp = (mem);                                          \
-     __typeof (*(mem)) __mask = ((__typeof (*(mem))) 1 << (bit));            \
+  ({ __typeof (*(mem)) __atg14_old;                                          \
+     __typeof (mem) __atg14_memp = (mem);                                    \
+     __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit));              \
                                                                              \
      do                                                                              \
-       __oldval = (*__memp);                                                 \
-     while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp,   \
-                                                                   __oldval  \
-                                                                   | __mask, \
-                                                                   __oldval),\
-                             0));                                            \
+       __atg14_old = (*__atg14_memp);                                        \
+     while (__builtin_expect                                                 \
+           (atomic_compare_and_exchange_bool_acq (__atg14_memp,              \
+                                                  __atg14_old | __atg14_mask,\
+                                                  __atg14_old), 0));         \
                                                                              \
-     __oldval & __mask; })
+     __atg14_old & __atg14_mask; })
 #endif
 
+/* Atomically *mem &= mask.  */
+#ifndef atomic_and
+# define atomic_and(mem, mask) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg15_old;                                           \
+    __typeof (mem) __atg15_memp = (mem);                                     \
+    __typeof (*(mem)) __atg15_mask = (mask);                                 \
+                                                                             \
+    do                                                                       \
+      __atg15_old = (*__atg15_memp);                                         \
+    while (__builtin_expect                                                  \
+          (atomic_compare_and_exchange_bool_acq (__atg15_memp,               \
+                                                 __atg15_old & __atg15_mask, \
+                                                 __atg15_old), 0));          \
+  } while (0)
+#endif
+
+#ifndef catomic_and
+# define catomic_and(mem, mask) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg20_old;                                           \
+    __typeof (mem) __atg20_memp = (mem);                                     \
+    __typeof (*(mem)) __atg20_mask = (mask);                                 \
+                                                                             \
+    do                                                                       \
+      __atg20_old = (*__atg20_memp);                                         \
+    while (__builtin_expect                                                  \
+          (catomic_compare_and_exchange_bool_acq (__atg20_memp,              \
+                                                  __atg20_old & __atg20_mask,\
+                                                  __atg20_old), 0));         \
+  } while (0)
+#endif
+
+/* Atomically *mem &= mask and return the old value of *mem.  */
+#ifndef atomic_and_val
+# define atomic_and_val(mem, mask) \
+  ({ __typeof (*(mem)) __atg16_old;                                          \
+     __typeof (mem) __atg16_memp = (mem);                                    \
+     __typeof (*(mem)) __atg16_mask = (mask);                                \
+                                                                             \
+     do                                                                              \
+       __atg16_old = (*__atg16_memp);                                        \
+     while (__builtin_expect                                                 \
+           (atomic_compare_and_exchange_bool_acq (__atg16_memp,              \
+                                                  __atg16_old & __atg16_mask,\
+                                                  __atg16_old), 0));         \
+                                                                             \
+     __atg16_old; })
+#endif
+
+/* Atomically *mem |= mask and return the old value of *mem.  */
+#ifndef atomic_or
+# define atomic_or(mem, mask) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg17_old;                                           \
+    __typeof (mem) __atg17_memp = (mem);                                     \
+    __typeof (*(mem)) __atg17_mask = (mask);                                 \
+                                                                             \
+    do                                                                       \
+      __atg17_old = (*__atg17_memp);                                         \
+    while (__builtin_expect                                                  \
+          (atomic_compare_and_exchange_bool_acq (__atg17_memp,               \
+                                                 __atg17_old | __atg17_mask, \
+                                                 __atg17_old), 0));          \
+  } while (0)
+#endif
+
+#ifndef catomic_or
+# define catomic_or(mem, mask) \
+  do {                                                                       \
+    __typeof (*(mem)) __atg18_old;                                           \
+    __typeof (mem) __atg18_memp = (mem);                                     \
+    __typeof (*(mem)) __atg18_mask = (mask);                                 \
+                                                                             \
+    do                                                                       \
+      __atg18_old = (*__atg18_memp);                                         \
+    while (__builtin_expect                                                  \
+          (catomic_compare_and_exchange_bool_acq (__atg18_memp,              \
+                                                  __atg18_old | __atg18_mask,\
+                                                  __atg18_old), 0));         \
+  } while (0)
+#endif
+
+/* Atomically *mem |= mask and return the old value of *mem.  */
+#ifndef atomic_or_val
+# define atomic_or_val(mem, mask) \
+  ({ __typeof (*(mem)) __atg19_old;                                          \
+     __typeof (mem) __atg19_memp = (mem);                                    \
+     __typeof (*(mem)) __atg19_mask = (mask);                                \
+                                                                             \
+     do                                                                              \
+       __atg19_old = (*__atg19_memp);                                        \
+     while (__builtin_expect                                                 \
+           (atomic_compare_and_exchange_bool_acq (__atg19_memp,              \
+                                                  __atg19_old | __atg19_mask,\
+                                                  __atg19_old), 0));         \
+                                                                             \
+     __atg19_old; })
+#endif
 
 #ifndef atomic_full_barrier
 # define atomic_full_barrier() __asm__ ("" ::: "memory")
 #endif
 
 
+#ifndef atomic_forced_read
+# define atomic_forced_read(x) \
+  ({ __typeof (x) __x; __asm__ ("" : "=r" (__x) : "0" (x)); __x; })
+#endif
+
+
 #ifndef atomic_delay
 # define atomic_delay() do { /* nothing */ } while (0)
 #endif
index e20fba8..483263e 100644 (file)
@@ -11,7 +11,10 @@ tcb-offsets.[hcs]
 lowlevelbarrier.[hcs]
 lowlevelcond.[hcs]
 lowlevelrwlock.[hcs]
+lowlevelrobustlock.[hcs]
 unwindbuf.[hcs]
+structsem.[hcs]
+pthread-pi-defines.[hcs]
 sysdeps/pthread/pt-sigaction.c
 sysdeps/pthread/pt-sigfillset.c
 sysdeps/pthread/pt-sigprocmask.c
index 92bd22a..c81eb03 100644 (file)
+2010-01-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
+       Fix unwind info.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
+
+2010-01-15  Michal Schmidt  <mschmidt@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
+       Fix pthread_cond_timedwait with requeue-PI.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S:
+       Fix pthread_cond_wait with requeue-PI.
+
+2010-01-14  Ulrich Drepper  <drepper@redhat.com>
+
+       * Versions: Add pthread_mutex_consistent, pthread_mutexattr_getrobust,
+       and pthread_mutexattr_setrobust for GLIBC_2.12.
+       * pthread_mutex_consistent.c: Define alias pthread_mutex_consistent.
+       * pthread_mutexattr_getrobust.c: Define alias
+       pthread_mutexattr_getrobust.
+       * pthread_mutexattr_setrobust.c: Define alias
+       pthread_mutexattr_setrobust.
+
+2010-01-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h: Cleanup.  Fix up for XPG7.
+
+2010-01-08  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h: Fix pthread_mutex_consistent declaration.
+
+2009-12-18  Thomas Schwinge  <thomas@codesourcery.com>
+
+       * sysdeps/unix/sysv/linux/s390/s390-32/pt-initfini.c (_init): Don't
+       call __gmon_start__.
+       * sysdeps/unix/sysv/linux/s390/s390-64/pt-initfini.c (_init): Likewise.
+
+2009-12-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_rwlock_init.c (__pthread_rwlock_init): Simplify code by
+       using memset.
+
+2009-12-01  Dinakar Guniguntala  <dino@in.ibm.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.h: Define
+       FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: If mutex
+       is a non robust PI mutex, then use FUTEX_CMP_REQUEUE_PI.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: If mutex
+       is a non robust PI mutex, then use FUTEX_WAIT_REQUEUE_PI.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
+
+2009-12-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S (sem_timedwait):
+       Don't update nwaiters after invalid timeout is recognized.
+
+2009-11-27  Thomas Schwinge  <thomas@codesourcery.com>
+
+       * sysdeps/unix/sysv/linux/sh/pt-initfini.c (_init): Don't call
+       __gmon_start__.
+
+2009-11-27  Andreas Schwab  <schwab@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/cancellation.S: Reload
+       THREAD_SELF->cancelhandling after returning from futex call.
+
+2009-11-24  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-sem13.c: New file.
+       * Makefile (tests): Add tst-sem13.
+
+2009-11-22  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/dl-sysdep.h: # include "i686/dl-sysdep.h"
+       instead of recapitulating its contents.
+
+2009-11-18  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Minor
+       optimizations and cleanups.
+
+2009-11-18  Dinakar Guniguntala  <dino@in.ibm.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
+       Remove redundant code. Fix cfi offsets.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S:
+       Fix cfi offsets.
+
+2009-11-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Minimally
+       reduce size of unwind info.
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Convert to use
+       cfi directives.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+       Based on a patch by Dinakar Guniguntala <dino@in.ibm.com>.
+
+2009-11-03  Andreas Schwab  <schwab@linux-m68k.org>
+
+       [BZ #4457]
+       * sysdeps/pthread/unwind-resume.c: Include <libgcc_s.h> and use
+       LIBGCC_S_SO.
+       * sysdeps/pthread/unwind-forcedunwind.c: Likewise.
+
+2009-10-30  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-sem11.c (main): Rewrite to avoid aliasing problems.
+
+       [BZ #3270]
+       * allocatestack.c (__nptl_setxid): Perform the operation in multiple
+       steps to avoid races with creation and terminations.
+       * nptl-init.c (sighandler_setxid): Adjust.
+       Patch by Daniel Jacobowitz.
+
+2009-09-07  Andreas Schwab  <schwab@redhat.com>
+
+       * sysdeps/pthread/bits/libc-lock.h (BP_SYM): Remove space before paren.
+
+2009-09-02  Suzuki K P  <suzuki@in.ibm.com>
+           Joseph Myers  <joseph@codesourcery.com>
+
+       [BZ #7094]
+       * sysdeps/unix/sysv/linux/timer_create.c (timer_create):
+       Initialize the sigev_notify field for newly created timer to make sure
+       the timer gets deleted from the active timer's list upon timer_delete.
+
+2009-08-27  Andrew Stubbs  <ams@codesourcery.com>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S (__lll_timedlock_wait):
+       Correct a logic error.
+
+2009-08-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/tls.h (RTLD_ENABLE_FOREIGN_CALL): Store old value
+       of the field in local variables.
+       (RTLD_FINALIZE_FOREIGN_CALL): Restore rtld_must_xmm_save from local
+       variable and don't unconditionally clear it.
+
+2009-08-24  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_create.c (start_thread): Hint to the kernel that memory for
+       the stack can be reused.  We do not mark all the memory.  The part
+       still in use and some reserve are kept.
+
+2009-08-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h: Clean up namespace.
+
+2009-08-11  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Add CFI
+       directives.
+
+2009-08-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Add CFI
+       directives.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Likewise.
+
+2009-08-10  Andreas Schwab  <schwab@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+       (__pthread_cond_signal): Don't clobber register used for syscall
+       number.
+
+2009-08-08  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S (sem_timedwait):
+       Optimize code path used when FUTEX_CLOCK_REALTIME is supported.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+       (__pthread_cond_wait): Optimize by avoiding use of callee-safe
+       register.
+
+2009-08-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Little optimizations
+       enabled by the special *_asynccancel functions.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+
+       * sysdeps/unix/sysv/linux/x86_64/cancellation.S: Include lowlevellock.h.
+
+2009-08-04  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/cancellation.S: New file.
+       * sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S: New file.
+       * sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S: New file.
+       * sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h (PSEUDO): Optimize
+       since we can assume the special __*_{en,dis}able_asynccancel
+       functions.
+       (PUSHARGS_*, POPARGS_*, SAVESTK_*, RESTSTK_*): Removed.
+       * sysdeps/x86_64/tcb-offsets.sym: Add cancellation-related bits
+       and PTHREAD_CANCELED.
+
+2009-07-31  Ulrich Drepper  <drepper@redhat.com>
+
+       * descr.h: Better definition of *_BITMASK macros for cancellation.
+
+2009-07-29  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/tls.h (TLS_TCB_ALIGN): Define explicitly to 32.
+
+       * sysdeps/x86_64/tls.h (tcbhead_t): Add room for SSE registers the
+       dynamic linker might have to save.
+       Define RTLD_CHECK_FOREIGN_CALL, RTLD_ENABLE_FOREIGN_CALL,
+       RTLD_PREPARE_FOREIGN_CALL, and RTLD_FINALIZE_FOREIGN_CALL.  Pretty
+       printing.
+
+       * sysdeps/x86_64/tcb-offsets.sym: Add RTLD_SAVESPACE_SSE.
+
+2009-07-28  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_mutex_lock.c [NO_INCR] (__pthread_mutex_cond_lock_adjust):
+       New function.
+       * pthreadP.h: Declare __pthread_mutex_cond_lock_adjust.
+       * sysdeps/unix/sysv/linux/pthread-pi-defines.sym: Add ROBUST_BIT.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Don't use
+       requeue_pi for robust mutexes.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+       Don't only skip __pthread_mutex_cond_lock.  Call instead
+       __pthread_mutex_cond_lock_adjust.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
+
+       * pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Minor
+       optimization of PI mutex handling.
+
+2009-07-27  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #10418]
+       * pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Use _rel
+       instead of of _acq variants of cmpxchg.
+
+2009-07-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/configure.in: New file.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Fix error
+       path when not using absolute timeout futex.
+
+2009-07-20  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Minor
+       optimizations of last changes.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
+
+2009-07-19  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Define
+       FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: If mutex
+       is a PI mutex, then use FUTEX_CMP_REQUEUE_PI.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: If mutex
+       is a PI mutex, then use FUTEX_WAIT_REQUEUE_PI.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+       (__pthread_cond_timedwait): Make more robust.
+
+2009-07-18  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
+       (__lll_robust_timedlock_wait): If possible use FUTEX_WAIT_BITSET to
+       directly use absolute timeout.
+
+       * tst-sem5.c (do_test): Add test for premature timeout.
+       * Makefile: Linu tst-sem5 with librt.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
+       (pthread_rwlock_timedwrlock): If possible use FUTEX_WAIT_BITSET to
+       directly use absolute timeout.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
+       (pthread_rwlock_timedrdlock): Likewise.
+
+       * tst-cond11.c (run_test): Add test to check that the timeout is
+       long enough.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+       (__pthread_cond_timedwait): If possible use FUTEX_WAIT_BITSET to
+       directly use absolute timeout.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+       (__pthread_cond_wait): Convert to using exception handler instead of
+       registered unwind buffer.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+       (__pthread_cond_timedwait): Likewise.
+
+2009-07-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S (sem_timedwait):
+       If possible use FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME to directly
+       use absolute timeout.
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S (sem_wait): Optimize
+       handling of uncontested semaphore.
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+       (__condvar_cleanup): Rewrite to use cfi directives instead of
+       hand-coded unwind tables.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_once.S (__pthread_once):
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S (sem_wait): Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S (sem_timedwait):
+       Likewise.
+
+2009-06-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * Makefile (libpthread-routines): Add pthread_sigqueue.
+       * Versions: Add pthread_sigqueue for GLIBC_2.11.
+       * sysdeps/pthread/bits/sigthread.h: Declare pthread_sigqueue.
+       * sysdeps/unix/sysv/linux/pthread_sigqueue.c: New file.
+
+2009-06-11  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #10262]
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+       (LOAD_FUTEX_WAIT_ABS): Fix futex parameter in case private futexes
+       cannot be assumed.
+       Patch by Bryan Kadzban <bz-glibc@kdzbn.homelinux.net>.
+
+2009-05-16  Ulrich Drepper  <drepper@redhat.com>
+
+       * libc-cancellation.c: Move __libc_cleanup_routine to...
+       * libc-cleanup.c: ...here.  New file.
+       * Makefile (routines): Add libc-cleanup.
+
+       * cancellation.c (__pthread_disable_asynccancel): Remove unnecessary
+       test.
+       * libc-cancellation.c: Use <nptl/cancellation.c: to define the code.
+       * sysdeps/pthread/librt-cancellation.c: Likewise.
+
+       [BZ #9924]
+       * nptl-init.c: Renamed from init.c.
+       * Makefile: Change all occurences of init.c to nptl-init.c.
+
+2009-05-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * cancellation.c (__pthread_disable_asynccancel): Correct the bits
+       to test when deciding on the delay.
+       * libc-cancellation.c (__libc_disable_asynccancel): Likewise.
+       * pthread_cancel.c: Close race between deciding on sending a signal
+       and setting the CANCELING_BIT bit.
+
+       * cancellation.c (__pthread_disable_asynccancel): Don't return if
+       thread is canceled.
+       * libc-cancellation.c (__libc_disable_asynccancel): Likewise.
+
+2009-04-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * cancellation.c (__pthread_disable_asynccancel): Use THREAD_ATOMIC_AND
+       is available.
+       * libc-cancellation.c (__libc_disable_asynccancel): Likewise.
+       * sysdeps/x86_64/tls.h: Define THREAD_ATOMIC_AND.
+       * sysdeps/i386/tls.h: Likewise.
+       (tcbhead_t): Add __private_tm member.
+
+2009-04-26  Ulrich Drepper  <drepper@redhat.com>
+
+       * sem_open.c (sem_open): Rewrite initialization of initsem to
+       avoid warnings.
+
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c (__libc_pthread_init):
+       Avoid warning by using may_alias attribute on ptrhack.
+
+2009-04-22  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #10090]
+       * pthread_attr_setschedparam.c (__pthread_attr_setschedparam):
+       Check policy and priority for validity.
+       Patch mostly by Zhang Xiliang <zhangxiliang@cn.fujitsu.com>.
+
+2009-03-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+       (__pthread_cond_timedwait): Change to use cfi directives instead of
+       hand-coded unwind sections.
+
+2009-03-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * init.c (nptl_freeres): Compile only for SHARED.
+
+2009-03-09  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Define
+       FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET, FUTEX_CLOCK_REALTIME and
+       FUTEX_BITSET_MATCH_ANY.
+
+2009-02-27  Roland McGrath  <roland@redhat.com>
+
+       * init.c (__nptl_initial_report_events): Mark __attribute_used__.
+       * pthread_create.c (__nptl_threads_events, __nptl_last_event): Likewise.
+
+2009-02-26  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
+       _POSIX_THREAD_ROBUST_PRIO_INHERIT and
+       _POSIX_THREAD_ROBUST_PRIO_PROTECT.  Reset value of macros from
+       200112L to 200809L.
+
+2009-02-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h: The robust mutex functions are in
+       POSIX 2008.
+
+2009-02-24  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h (_BITS_POSIX_OPT_H):
+       Unify name of include protector macro.
+
+2009-02-14  SUGIOKA Toshinobu  <sugioka@itonet.co.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S: Define
+       LOAD_FUTEX_WAIT_ABS even if (FUTEX_WAIT == 0).
+
+2009-01-29  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/unwind-forcedunwind.c: Encrypt all function
+       pointer variables.
+
+       * allocatestack.c (__free_stacks): Renamed from free_stacks.
+       (__free_stack_cache): Removed.  Change callers to call __free_stacks.
+       * init.c (nptl_freeres): New function.
+       (pthread_functions): Initialize ptr_freeres to nptl_freeres.
+       * pthreadP.h: Don't declare __free_stack_cache.  Declare __free_stacks.
+       * sysdeps/pthread/unwind-forcedunwind.c (libgcc_s_handle): New
+       variable.
+       (pthread_cancel_init): Depend in libgcc_s_handle for decision to
+       load DSO.  Assign last.
+       (__unwind_freeres): New function.
+
+       * allocatestack.c (__reclaim_stacks): Reset in_flight_stack later
+       for better debugging.  No need to use stack_list_add here.
+
+2009-01-14  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S
+       (__lll_timedlock_wait): Use FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME
+       instead of computing relative timeout.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Define
+       FUTEX_CLOCK_REALTIME and FUTEX_BITSET_MATCH_ANY.
+
+2009-01-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_mutex_lock.c (__pthread_mutex_lock): Remove unused label out.
+
+2009-01-08  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/list.h (list_add): Initialize new element first.
+       (list_add_tail): Removed.
+
+2009-01-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * (in_flight_stack): New variable.
+       (stack_list_del): New function.  Use instead of list_del.
+       (stack_list_add): New function.  Use instead of list_add when adding to
+       stack_cache and stack_used lists.
+       (__reclaim_stacks): Complete operations on stack_cache and stack_used lists
+       when the fork call interrupted another thread.
+
+2009-01-04  Ulrich Drepper  <drepper@redhat.com>
+
+       * init.c (__pthread_initialize_minimal_internal): Optimize test
+       FUTEX_CLOCK_REALTIME a bit.
+
+2009-01-03  Ulrich Drepper  <drepper@redhat.com>
+
+       * init.c (__pthread_initialize_minimal_internal): Cheat a bit by
+       only passing five parameters to FUTEX_WAIT_BITSET call.
+
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+       (__lll_timedlock_wait): Use FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME
+       instead of computing relative timeout.
+
+2009-01-02  Ulrich Drepper  <drepper@redhat.com>
+
+       * init.c (__pthread_initialize_minimal_internal): Check for
+       FUTEX_CLOCK_REALTIME flag.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S (__lll_timedlock_wait):
+       Use FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME instead of computing
+       relative timeout.
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Define
+       FUTEX_CLOCK_REALTIME and FUTEX_BITSET_MATCH_ANY.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+
+2008-12-09  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h (pthread_cleanup_pop): Use { } as empty
+       loop body instead of ; to avoid gcc warnings.
+       (pthread_cleanup_pop_restore_np): Likewise.
+       Patch by Caolán McNamara <caolanm@redhat.com>.
+
+2008-12-09  Jakub Jelinek  <jakub@redhat.com>
+
+       * pthread_mutex_lock.c (__pthread_mutex_lock): Handle only the
+       fast path here, for robust/PI/PP mutexes call
+       __pthread_mutex_lock_full.  Don't use switch, instead use a series
+       of ifs according to their probability.
+       (__pthread_mutex_lock_full): New function.
+       * pthread_mutex_unlock.c: Include assert.h.
+       (__pthread_mutex_unlock_usercnt): Handle only the
+       fast path here, for robust/PI/PP mutexes call
+       __pthread_mutex_unlock_full.  Don't use switch, instead use a series
+       of ifs according to their probability.
+       (__pthread_mutex_unlock_full): New function.
+       * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
+       (__pthread_mutex_lock_full): Define.
+
+2008-12-08  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/tls.h (tcbhead_t): Add fields reserved for TM
+       implementation.  Add necessary padding and.
+       * descr.h (struct pthread): Increase padding for tcbhead_t to 24
+       words.
+
+2008-12-04  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Define FUTEX_WAIT_BITSET
+       and FUTEX_WAKE_BITSET.
+
+2008-12-02  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Define FUTEX_WAIT_BITSET
+       and FUTEX_WAKE_BITSET.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+
+2008-11-25  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/alpha, sysdeps/unix/sysv/linux/alpha:
+       Subdirectories moved to ports repository as
+       sysdeps/.../nptl subdirectories.
+
+2008-11-12  Jakub Jelinek  <jakub@redhat.com>
+
+       [BZ #7008]
+       * pthread_condattr_setclock.c (pthread_condattr_setclock): Fix masking
+       of old value.
+       * pthread_cond_init.c (__pthread_cond_init): Fix
+       cond->__data.__nwaiters initialization.
+       * Makefile (tests): Add tst-cond23.
+       * tst-cond23.c: New test.
+
+2008-11-07  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/pthread/malloc-machine.h (MALLOC): Adjust __libc_tsd_define
+       arguments.
+       (tsd_setspecific, tsd_getspecific): Adjust __libc_tsd_{set,get}
+       arguments.
+
+2008-11-01  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #6955]
+       * pthread_mutex_lock.c: Add support for private PI mutexes.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       Patch mostly by Ben Jackson <ben@ben.com>.
+
+2008-10-31  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #6843]
+       * sysdeps/pthread/gai_misc.h (__gai_create_helper_thread):
+       Increase stack size for helper thread.
+
+2008-10-06  Martin Schwidefsky  <schwidefsky@de.ibm.com>
+
+       * sysdeps/s390/tls.h (THREAD_SET_STACK_GUARD): Add empty inline
+       assembly with a clobber list for access registers a0 and a1.
+
+2008-09-11  Martin Schwidefsky  <schwidefsky@de.ibm.com>
+
+       * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Add memory barrier
+       to force runp->refcntr to be read from memory.
+
+2008-09-08  Richard Guenther  <rguenther@suse.de>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_lock,
+       lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
+       lll_timedlock, lll_robust_timedlock, lll_unlock,
+       lll_robust_unlock): Promote private to int.
+
+2008-08-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/pthreaddef.h: Remove ARCH_MAP_FLAGS and
+       ARCH_RETRY_MMAP definitions.
+       * allocatestack.c: Remove definition of ARCH_MAP_FLAGS.
+       Define MAP_STACK when not defined.
+       (allocate_stack): Use MAP_STACK instead of ARCH_MAP_FLAGS.  Remove
+       handling of ARCH_RETRY_MMAP.
+
+2008-07-30  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-align2.c (f): Print message that f is reached.
+
+2008-04-28  Hiroki Kaminaga  <kaminaga@sm.sony.co.jp>
+
+       [BZ #6740]
+       * sysdeps/powerpc/tcb-offsets.sym (PRIVATE_FUTEX_OFFSET): Guard symbol
+       definition with #ifndef __ASSUME_PRIVATE_FUTEX.
+
+2008-07-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/mq_notify.c (init_mq_netlink): Use
+       SOCK_CLOEXEC if possible.
+
+2008-05-29  Ulrich Drepper  <drepper@redhat.com>
+
+       * Makefile (tests): Add tst-rwlock2a.
+       * tst-rwlock2.c: Use TYPE macro to decide what rwlock type to use.
+       * tst-rwlock2a.c: New file.
+
+2008-06-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h: Remove inadvertant checkin.
+
+2008-05-17  Samuel Thibault  <samuel.thibault@ens-lyon.org>
+
+       * sysdeps/pthread/pthread.h: Fix typo in comment.
+
+2008-05-28  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/createthread.c (do_clone): Pass accurate length
+       of CPU set to the kernel.
+
+2008-05-23  Paul Pluzhnikov  <ppluzhnikov@google.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Add
+       cfi directives.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Likewise.
+
+2008-05-22  Paul Pluzhnikov  <ppluzhnikov@google.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S: Add
+       cfi directives.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+       Likewise.
+
+2008-05-26  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-typesizes.c: Explicitly check __SIZEOF_PTHREAD_* constants.
+
+2008-05-20  Jakub Jelinek  <jakub@redhat.com>
+
+       David S. Miller  <davem@davemloft.net>
+
+       * sysdeps/unix/sysv/linux/sparc/sparc64/Makefile: New file.
+
+2008-05-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Access
+       __pshared correctly.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S:
+       Likewise.
+       Reported by Clemens Kolbitsch <clemens.kol@gmx.at>.
+
+2008-04-14  David S. Miller  <davem@davemloft.net>
+
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c
+       (__old_sem_wait): Fix argument to lll_futex_wait().
+
+2007-11-26  Daniel Jacobowitz  <dan@codesourcery.com>
+
+       * pthread_create.c: Require pthread_mutex_trylock and
+       pthread_key_delete for libgcc.
+
+2008-04-08  Jakub Jelinek  <jakub@redhat.com>
+
+       [BZ #6020]
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+       (lll_futex_wake_unlock): Add private argument to the pre-v9 macro.
+       Patch by Sunil Amitkumar Janki <devel.sjanki@gmail.com>.
+
+2008-03-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/bits/local_lim.h: Undefine ARG_MAX if
+       <linux/limits.h> has defined it.
+       * sysdeps/unix/sysv/linux/alpha/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/bits/local_lim.h: Likewise.
+
+2008-03-18  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/ia64/dl-sysdep.h: Use __ASSEMBLER__ instead
+       of ASSEMBLER.
+       * sysdeps/unix/sysv/linux/i386/i686/dl-sysdep.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/dl-sysdep.h: Likewise.
+
+2008-03-14  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/dl-sysdep.h: Define
+       HAVE_DL_DISCOVER_OSVERSION.
+       * sysdeps/unix/sysv/linux/i386/i686/dl-sysdep.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/dl-sysdep.h: Likewise.
+
+2008-03-07  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #5778]
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h: Change
+       _POSIX_CHOWN_RESTRICTED value to zero.
+
+2008-01-31  Roland McGrath  <roland@redhat.com>
+
+       * Makefile (omit-deps): Variable removed.
+
+2008-01-30  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_post.S (sem_post): Avoid
+       unnecessary addr32 prefix.
+
+2008-01-29  Roland McGrath  <roland@redhat.com>
+
+       * Makeconfig (ptw-CPPFLAGS, sysd-rules-patterns): New variables.
+
+2008-01-22  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/sem_post.S: Don't overflow value field.
+
+2008-01-21  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h (XADD): Use
+       a scratch register.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S
+       (__lll_lock_wait_private): Fix typo.
+       * sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
+       (pthread_barrier_wait): Likewise.  Adjust XADD use.
+       * sysdeps/unix/sysv/linux/sh/sem_post.S (__new_sem_post):
+       Adjust XADD use.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
+       (pthread_rwlock_timedrdlock): Return correct return value.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
+       (pthread_rwlock_timedwrlock): Likewise.
+
+2008-01-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-eintr2.c (do_test): make sure that if mutex_lock in main
+       thread returns the program exits with an error code.
+
+2008-01-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread-errnos.sym: Add EOVERFLOW.
+       * sysdeps/unix/sysv/linux/structsem.sym: Add SEM_VALUE_MAX.
+       * sysdeps/unix/sysv/linux/sem_post.c: Don't overflow value field.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Likewise.
+
+2007-12-14  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/pthreaddef.h (ARCH_RETRY_MMAP): Take additional
+       parameter.  Passed it as permission to mmap.
+       * allocatestack.c (allocate_stack): Pass prot as second parameter
+       to ARCH_RETRY_MMAP.
+
+2007-12-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-basic7.c: Allocate memory for the stack.
+
+       [BZ #5465]
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S [!SHARED]
+       (__pthread_cond_timedwait): Don't use VDSO.
+       Patch by Michal Januszewski.
+
+2007-12-07  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #5455]
+       * sysdeps/pthread/pthread.h [!__EXCEPTIONS] (pthread_cleanup_pop):
+       Allow label before pthread_cleanup_pop.
+       (pthread_cleanup_pop_restore_np): Likewise.
+
+2007-12-04  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S (__lll_timedlock_wait):
+       Store 2 before returning ETIMEDOUT.
+
+2007-11-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S (__lll_timedlock_wait):
+       Store 2 before returning ETIMEDOUT.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Likewise
+       * sysdeps/unix/sysv/linux/lowlevellock.c: Likewise.
+       (__lll_lock_wait_private): Optimize.
+       (__lll_lock_wait): Likewise.
+
+2007-11-20  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/pthread/pthread.h (pthread_cleanup_push,
+       pthread_cleanup_push_defer_np): Add extra (void *) cast to shut up
+       g++ 4.1 and 4.2 -Wstrict-aliasing warnings.
+
+2007-11-08  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #5240]
+       * sysdeps/unix/sysv/linux/lowlevellock.c (__lll_timedlock_wait):
+       If we time out, try one last time to lock the futex to avoid
+       losing a wakeup signal.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
+
+       [BZ #5245]
+       * sysdeps/pthread/createthread.c (do_clone): Translate clone error
+       if necessary.
+
+2007-11-07  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #5245]
+       * allocatestack.c (allocate_stack): Change ENOMEM error in case
+       mmap failed to EAGAIN.
+       * Makefile (tests): Add tst-basic7.
+       * tst-basic7.c: New file.
+
+2007-11-05  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork):
+       Use __linkin_atfork.
+
+2007-11-03  Mike Frysinger  <vapier@gentoo.org>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S (LOAD_FUTEX_WAIT): Add
+       missing line continuations.
+       * sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S (LOAD_FUTEX_WAIT,
+       LOAD_FUTEX_WAKE): Likewise.  Also add missing 3rd parameter.
+
+2007-10-28  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #5220]
+       * sysdeps/unix/sysv/linux/kernel-posix-timers.h: Declare
+       __active_timer_sigev_thread and __active_timer_sigev_thread_lock.
+       (struct timer): Add next element.
+       * sysdeps/unix/sysv/linux/timer_create.c: For SIGEV_THREAD timers,
+       enqueue timer structure into __active_timer_sigev_thread list.
+       * sysdeps/unix/sysv/linux/timer_delete.c: For SIGEV_THREAD timers,
+       remove timer struct from __active_timer_sigev_thread.
+       * sysdeps/unix/sysv/linux/timer_routines.c (timer_helper_thread):
+       Before using timer structure make sure it is still on the
+       __active_timer_sigev_thread list.  Keep lock until done.
+       Define __active_timer_sigev_thread and
+       __active_timer_sigev_thread_lock.
+
+2007-10-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/malloc-machine.h: Define ATFORK_MEM.
+       Redefine thread_atfork for use of ATFORK_MEM.
+       * sysdeps/unix/sysv/linux/fork.h: Define __linkin_atfork.
+       * sysdeps/unix/sysv/linux/register-atfork.c (__linkin_atfork): New
+       function.
+       * sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
+       Use atomic operation when removing first element of list.
+
+2007-10-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/sem_post.S (__old_sem_post): New
+       routine instead of an alias to __new_sem_post.
+
+2007-10-15  Jakub Jelinek  <jakub@redhat.com>
+
+       * init.c (__pthread_initialize_minimal): Initialize word to appease
+       valgrind.
+
+2007-10-10  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/pthread/bits/libc-lock.h (__libc_rwlock_init): Inside of
+       libc.so just clear NAME.
+       (__libc_rwlock_fini): Nop inside of libc.so.
+       * tst-initializers1.c (main): Test if PTHREAD_RWLOCK_INITIALIZER is
+       all zeros.
+
+2007-09-02  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+       (__pthread_cond_wait): Fix unlocking of internal lock after mutex
+       unlocking failed.
+       Patch by Luca Barbieri <luca.barbieri@gmail.com>.
+
+2007-08-21  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #4938]
+       * allocatestack.c (__reclaim_stacks): Clear the TSD in the
+       reclaimed stack if necessary.
+       * Makefile (tests): Add tst-tsd6.
+       * tst-tsd6.c: New file.
+
+2007-08-21  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_robust_dead):
+       Add private argument.
+
+2007-08-20  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+       (__pthread_cond_timedwait): Use clock_gettime from VDSO if possible.
+
+2007-08-16  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h
+       (__lll_robust_timedlock): Pass private as last argument to
+       __lll_robust_timedlock_wait.
+       (__lll_unlock): Fix a pasto.
+
+2007-08-15  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/internaltypes.h (sparc_new_sem,
+       sparc_old_sem): New structs.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c
+       (__sem_wait_cleanup): New function.
+       (__new_sem_wait): Use sparc_new_sem structure.  Bump and afterwards
+       decrease nwaiters.  Register __sem_wait_cleanup as cleanup handler.
+       Pass isem->private ^ FUTEX_PRIVATE_FLAG as last argument to
+       lll_futex_wait.
+       (__old_sem_wait): New function.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_wait.c: Include
+       nptl/sysdeps/unix/sysv/linux/sparc version.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_timedwait.c:
+       Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_post.c: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c
+       (__new_sem_trywait): Use sparc_old_sem structure.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
+       (sem_timedwait): Use sparc_new_sem structure.  Bump and afterwards
+       decrease nwaiters.  Register __sem_wait_cleanup as cleanup handler.
+       Pass isem->private ^ FUTEX_PRIVATE_FLAG as last argument to
+       lll_futex_timed_wait.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c (__new_sem_post):
+       Use sparc_new_sem structure.  Only wake if nwaiters > 0.  Pass
+       isem->private ^ FUTEX_PRIVATE_FLAG as last argument to
+       lll_futex_wake.
+       (__old_sem_post): New function.
+       * sysdeps/unix/sysv/linux/sparc/sem_wait.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sem_init.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sem_timedwait.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sem_post.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_init.c: Remove.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_init.c: Remove.
+
+2007-08-14  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
+       (__pthread_cond_broadcast): Pass LLL_PRIVATE to lll_* and or
+       FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+       Don't use FUTEX_CMP_REQUEUE if dep_mutex is not process private.
+       * sysdeps/unix/sysv/linux/shpthread_cond_signal.S
+       (__pthread_cond_signal): Pass LLL_PRIVATE to lll_* and or
+       FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+       Use FUTEX_WAKE_OP.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Include
+       kernel-features.h and tcb-offsets.h.
+       (__pthread_cond_wait, __condvar_w_cleanup): Pass LLL_PRIVATE to
+       lll_* and or FUTEX_PRIVATE_FLAG into SYS_futex op if cv is
+       process private.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Include
+       tcb-offsets.h.
+       (__pthread_cond_timedwait, __condvar_tw_cleanup): Pass LLL_PRIVATE
+       to lll_* and or FUTEX_PRIVATE_FLAG into SYS_futex op if cv is
+       process private.
+       * sysdeps/unix/sysv/linux/sh/pthread_once.S: Use #ifdef
+       __ASSUME_PRIVATE_FUTEX instead of #if __ASSUME_PRIVATE_FUTEX.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S: Likewise.
+
+2007-08-14  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/lowlevellock.c: Comment fix.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
+       (__lll_timedwait_tid): Pass LLL_SHARED as 4th argument to
+       lll_futex_timed_wait.
+
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (__lll_unlock,
+       __lll_robust_unlock): Rewrite as macros instead of inline functions.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h (__lll_unlock,
+       __lll_robust_unlock, __lll_wait_tid): Likewise.
+
+2007-08-13  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h (__lll_private_flag):
+       Fix a pasto.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
+       (__pthread_cond_broadcast): Pass LLL_PRIVATE to lll_* and or
+       FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+       Don't use FUTEX_CMP_REQUEUE if dep_mutex is not process private.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
+       (__pthread_cond_signal): Pass LLL_PRIVATE to lll_* and or
+       FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Include
+       kernel-features.h.
+       (__pthread_cond_wait, __condvar_w_cleanup): Pass LLL_PRIVATE to
+       lll_* and or FUTEX_PRIVATE_FLAG into SYS_futex op if cv is
+       process private.  Switch DW_CFA_advance_loc1 and some
+       DW_CFA_advance_loc .eh_frame opcodes to DW_CFA_advance_loc4.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
+       (__pthread_cond_timedwait, __condvar_tw_cleanup): Pass LLL_PRIVATE to
+       lll_* and or FUTEX_PRIVATE_FLAG into SYS_futex op if cv is
+       process private.  Switch DW_CFA_advance_loc{1,2} and some
+       DW_CFA_advance_loc .eh_frame opcodes to DW_CFA_advance_loc4.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Use
+       #ifdef __ASSUME_PRIVATE_FUTEX instead of #if __ASSUME_PRIVATE_FUTEX.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
+       (__pthread_cond_broadcast): Compare %r8 instead of
+       dep_mutex-cond_*(%rdi) with $-1.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+       (__pthread_cond_signal): Xor FUTEX_WAKE_OP with FUTEX_WAKE instead
+       of oring.
+
+2007-08-13  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i786/Implies: New file.
+
+2007-08-13  Jakub Jelinek  <jakub@redhat.com>
+
+       * allocatestack.c: Include kernel-features.h.
+       * pthread_create.c: Likewise.
+       * pthread_mutex_init.c: Likewise.
+       * init.c: Likewise.
+       * pthread_cond_timedwait.c: Likewise.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Likewise.
+
+2007-08-12  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
+       [__WORDSIZE=32] (pthread_rwlock_t): Split __flags element into four
+       byte elements.  One of them is the new __shared element.
+       [__WORDSIZE=64] (pthread_rwlock_t): Renamed __pad1 element to __shared,
+       adjust names of other padding elements.
+       * sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h
+       [__WORDSIZE=32] (pthread_rwlock_t): Split __flags element into four
+       byte elements.  One of them is the new __shared element.
+       [__WORDSIZE=64] (pthread_rwlock_t): Renamed __pad1 element to __shared,
+       adjust names of other padding elements.
+       * sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h (pthread_rwlock_t):
+       Renamed __pad1 element to __shared, adjust names of other padding
+       elements.
+       * sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
+       (pthread_rwlock_t): Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h (__lll_lock): Fix a
+       typo.
+
+2007-08-09  Anton Blanchard  <anton@samba.org>
+
+       * sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c: New file.
+
+2007-08-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Include
+       <kernel-features.h>.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
+
+2007-08-11  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthreadP.h (PTHREAD_ROBUST_MUTEX_PSHARED): Define.
+       * pthread_mutex_lock.c: Use it instead of PTHREAD_MUTEX_PSHARED when
+       dealing with robust mutexes.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c: Likewise.
+
+2007-08-06  Jakub Jelinek  <jakub@redhat.com>
+
+       * pthreadP.h (PTHREAD_MUTEX_PSHARED_BIT): Define.
+       (PTHREAD_MUTEX_TYPE): Mask __kind with 127.
+       (PTHREAD_MUTEX_PSHARED): Define.
+       * pthread_mutex_init.c (__pthread_mutex_init): Set
+       PTHREAD_MUTEX_PSHARED_BIT for pshared or robust
+       mutexes.
+       * pthread_mutex_lock.c (LLL_MUTEX_LOCK): Take mutex as argument
+       instead of its __data.__lock field, pass PTHREAD_MUTEX_PSHARED
+       as second argument to lll_lock.
+       (LLL_MUTEX_TRYLOCK): Take mutex as argument
+       instead of its __data.__lock field.
+       (LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its
+       __data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument
+       to lll_robust_lock.
+       (__pthread_mutex_lock): Update LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
+       LLL_ROBUST_MUTEX_LOCK users, use PTHREAD_MUTEX_TYPE (mutex)
+       instead of mutex->__data.__kind directly, pass
+       PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock and lll_futex_wait.
+       * pthread_mutex_trylock.c (__pthread_mutex_trylock): Use
+       PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind
+       directly, pass PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock.
+       (pthread_mutex_timedlock): Pass PTHREAD_MUTEX_PSHARED (mutex)
+       to lll_timedlock, lll_robust_timedlock, lll_unlock and
+       lll_futex_timed_wait.  Use PTHREAD_MUTEX_TYPE (mutex) instead
+       of mutex->__data.__kind directly.
+       * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Pass
+       PTHREAD_MUTEX_PSHARED (mutex) to lll_timedlock,
+       lll_robust_timedlock, lll_unlock and lll_futex_timed_wait.  Use
+       PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind directly.
+       * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Pass
+       PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock, lll_robust_unlock
+       and lll_futex_wake.
+       * pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling): Pass
+       PTHREAD_MUTEX_PSHARED (mutex) to lll_futex_wait and lll_futex_wake.
+       Use PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind
+       directly.
+       * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK):
+       Take mutex as argument instead of its __data.__lock field, pass
+       PTHREAD_MUTEX_PSHARED as second argument to lll_cond_lock.
+       (LLL_MUTEX_TRYLOCK): Take mutex as argument instead of its
+       __data.__lock field.
+       (LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its
+       __data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument
+       to lll_robust_cond_lock.
+       * pthread_cond_broadcast.c (__pthread_cond_broadcast): Add pshared
+       variable, pass it to lll_lock, lll_unlock, lll_futex_requeue and
+       lll_futex_wake.  Don't use lll_futex_requeue if dependent mutex
+       has PTHREAD_MUTEX_PSHARED_BIT bit set in its __data.__kind.
+       * pthread_cond_destroy.c (__pthread_cond_destroy): Add pshared
+       variable, pass it to lll_lock, lll_unlock, lll_futex_wake and
+       lll_futex_wait.
+       * pthread_cond_signal.c (__pthread_cond_signal): Add pshared
+       variable, pass it to lll_lock, lll_unlock, lll_futex_wake_unlock and
+       lll_futex_wake.
+       * pthread_cond_timedwait.c (__pthread_cond_wait): Add
+       pshared variable, pass it to lll_lock, lll_unlock,
+       lll_futex_timedwait and lll_futex_wake.
+       * pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): Add
+       pshared variable, pass it to lll_lock, lll_unlock, lll_futex_wait
+       and lll_futex_wake.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_futex_requeue,
+       lll_futex_wake_unlock): Add private argument, use __lll_private_flag
+       macro.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h (lll_futex_requeue,
+       lll_futex_wake_unlock): Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h (lll_futex_requeue):
+       Likewise.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_futex_requeue,
+       lll_futex_wake_unlock): Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_futex_requeue):
+       Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h (lll_futex_requeue,
+       lll_futex_wake_unlock): Likewise.
+       (lll_futex_wake): Fix a typo.
+       * sysdeps/unix/sysv/linux/pthread-pi-defines.sym (PS_BIT): Add.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
+       (__pthread_cond_broadcast): Pass LLL_PRIVATE to lll_* and or
+       FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+       Don't use FUTEX_CMP_REQUEUE if dep_mutex is not process private.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+       (__pthread_cond_signal): Pass LLL_PRIVATE to lll_* and or
+       FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+       (__pthread_cond_timedwait): Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
+       (__condvar_cleanup, __pthread_cond_wait): Likewise.
+
+2007-08-05  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h (PSEUDO):
+       Don't use CGOTSETUP and CGOTRESTORE macros.
+       (CGOTSETUP, CGOTRESTORE): Remove.
+       <IS_IN_rtld> (CENABLE, CDISABLE): Don't use JUMPTARGET, branch to
+       @local symbol.
+
+2007-08-01  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S: Remove
+       definitions for private futexes.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S: Include
+       kernel-features.h and lowlevellock.h.  Use private futexes if
+       they are available.
+       (__lll_lock_wait_private, __lll_unlock_wake_private): New.
+       (__lll_mutex_lock_wait): Rename to
+       (__lll_lock_wait): ... this.  Don't compile in for libc.so.
+       (__lll_mutex_timedlock_wait): Rename to ...
+       (__lll_timedlock_wait): ... this.  Use __NR_gettimeofday.
+       Don't compile in for libc.so.
+       (__lll_mutex_unlock_wake): Rename to ...
+       (__lll_unlock_wake): ... this.  Don't compile in for libc.so.
+       (__lll_timedwait_tid): Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Allow including
+       the header from assembler.  Renamed all lll_mutex_* resp.
+       lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
+       Renamed all LLL_MUTEX_LOCK_* macros to LLL_LOCK_*.
+       (FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE):
+       Define.
+       (__lll_lock_wait_private): Add prototype.
+       (__lll_lock_wait, __lll_timedlock_wait, __lll_robust_lock_wait,
+       __lll_robust_timedlock_wait, __lll_unlock_wake_private,
+       __lll_unlock_wake): Likewise.
+       (lll_lock): Add private argument.  Call __lll_lock_wait_private
+       if private is constant LLL_PRIVATE.
+       (lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
+       lll_timedlock, lll_robust_timedlock): Add private argument.
+       (lll_unlock): Add private argument.  Call __lll_unlock_wake_private
+       if private is constant LLL_PRIVATE.
+       (lll_robust_unlock, lll_robust_dead): Add private argument.
+       (lll_lock_t): Remove.
+       (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
+       __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
+       lll_cond_wake, lll_cond_broadcast): Remove.
+       * sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S: Include
+       kernel-features.h and lowlevellock.h.
+       (SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Remove.
+       (LOAD_FUTEX_WAIT): Define.
+       (__lll_robust_mutex_lock_wait): Rename to ...
+       (__lll_robust_lock_wait): ... this.  Add private argument.
+       Use LOAD_FUTEX_WAIT macro.
+       (__lll_robust_mutex_timedlock_wait): Rename to ...
+       (__lll_robust_timedlock_wait): ... this.    Add private argument.
+       Use __NR_gettimeofday.  Use LOAD_FUTEX_WAIT macro.
+       * sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Remove.
+       (pthread_barrier_wait): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S: Include
+       lowlevellock.h and pthread-errnos.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
+       FUTEX_CMP_REQUEUE, EINVAL): Remove.
+       (__pthread_cond_broadcast): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S: Include
+       lowlevellock.h and pthread-errnos.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, EINVAL): Remove.
+       (__pthread_cond_signal): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Include
+       lowlevellock.h.
+       (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE): Remove.
+       (__pthread_cond_timedwait): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.  Use __NR_gettimeofday.
+       (__condvar_tw_cleanup): Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Remove.
+       (__pthread_cond_wait): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       ( __condvar_w_cleanup): Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_once.S: Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
+       (__pthread_rwlock_rdlock): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S: Include
+       lowlevellock.h.
+       (SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE,
+       FUTEX_PRIVATE_FLAG): Remove.
+       (pthread_rwlock_timedrdlock): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.  Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S: Include
+       lowlevellock.h.
+       (SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE,
+       FUTEX_PRIVATE_FLAG): Remove.
+       (pthread_rwlock_timedwrlock): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.  Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
+       (__pthread_rwlock_unlock): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
+       (__pthread_rwlock_wrlock): Use __lll_{lock,unlock}_* instead of
+       __lll_mutex_{lock,unlock}_*.
+       * sysdeps/unix/sysv/linux/sh/sem_post.S: Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
+       (__new_sem_post): Use standard initial exec code sequences.
+       * sysdeps/unix/sysv/linux/sh/sem_timedwait.S: Include
+       lowlevellock.h.
+       (SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE,
+       FUTEX_PRIVATE_FLAG): Remove.
+       (sem_timedwait): Use __NR_gettimeofday.  Use standard initial
+       exec code sequences.
+       * sysdeps/unix/sysv/linux/sh/sem_trywait.S: Include lowlevellock.h.
+       (__new_sem_trywait): Use standard initial exec code sequences.
+       * sysdeps/unix/sysv/linux/sh/sem_wait.S: Include lowlevellock.h.
+       (__new_sem_wait): Use standard initial exec code sequences.
+
+2007-07-31  Anton Blanchard  <anton@samba.org>
+
+       * sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
+       Use __asm __volatile (__lll_acq_instr ::: "memory") instead of
+       atomic_full_barrier.
+
+2007-07-31  Jakub Jelinek  <jakub@redhat.com>
+
+       * allocatestack.c (stack_cache_lock): Change type to int.
+       (get_cached_stack, allocate_stack, __deallocate_stack,
+       __make_stacks_executable, __find_thread_by_id, __nptl_setxid,
+       __pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE
+       as second argument to lll_lock and lll_unlock macros on
+       stack_cache_lock.
+       * pthread_create.c (__find_in_stack_list): Likewise.
+       (start_thread): Similarly with pd->lock.  Use lll_robust_dead
+       macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it
+       as second argument.
+       * descr.h (struct pthread): Change lock and setxid_futex field
+       type to int.
+       * old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use
+       LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER.
+       * old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise.
+       * old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0):
+       Likewise.
+       * old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise.
+       * pthread_cond_init.c (__pthread_cond_init): Likewise.
+       * pthreadP.h (__attr_list_lock): Change type to int.
+       * pthread_attr_init.c (__attr_list_lock): Likewise.
+       * pthread_barrier_destroy.c (pthread_barrier_destroy): Pass
+       ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to
+       lll_{,un}lock.
+       * pthread_barrier_wait.c (pthread_barrier_wait): Likewise and
+       also for lll_futex_{wake,wait}.
+       * pthread_barrier_init.c (pthread_barrier_init): Make iattr
+       a pointer to const.
+       * pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass
+       LLL_SHARED as second argument to lll_{,un}lock.
+       * pthread_cond_destroy.c (__pthread_cond_destroy): Likewise.
+       * pthread_cond_signal.c (__pthread_cond_singal): Likewise.
+       * pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise.
+       * pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait):
+       Likewise.
+       * pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE
+       as second argument to lll_{,un}lock macros on pd->lock.
+       * pthread_getschedparam.c (__pthread_getschedparam): Likewise.
+       * pthread_setschedparam.c (__pthread_setschedparam): Likewise.
+       * pthread_setschedprio.c (pthread_setschedprio): Likewise.
+       * tpp.c (__pthread_tpp_change_priority, __pthread_current_priority):
+       Likewise.
+       * sysdeps/pthread/createthread.c (do_clone, create_thread):
+       Likewise.
+       * pthread_once.c (once_lock): Change type to int.
+       (__pthread_once): Pass LLL_PRIVATE as second argument to
+       lll_{,un}lock macros on once_lock.
+       * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use
+       lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass
+       rwlock->__data.__shared as second argument to them and similarly
+       for lll_futex_w*.
+       * pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock):
+       Likewise.
+       * pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock):
+       Likewise.
+       * pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise.
+       * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise.
+       * pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise.
+       * pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise.
+       * sem_close.c (sem_close): Pass LLL_PRIVATE as second argument
+       to lll_{,un}lock macros on __sem_mappings_lock.
+       * sem_open.c (check_add_mapping): Likewise.
+       (__sem_mappings_lock): Change type to int.
+       * semaphoreP.h (__sem_mappings_lock): Likewise.
+       * pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
+       LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros
+       instead of lll_*mutex_*, pass LLL_SHARED as last
+       argument.
+       (__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock,
+       pass LLL_SHARED as last argument.
+       * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK,
+       LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use
+       lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*,
+       pass LLL_SHARED as last argument.
+       * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
+       lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass
+       LLL_SHARED as last argument.
+       * pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly.
+       * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt):
+       Similarly.
+       * sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock,
+       __libc_lock_lock_recursive, __libc_lock_unlock,
+       __libc_lock_unlock_recursive): Pass LLL_PRIVATE as second
+       argument to lll_{,un}lock.
+       * sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock,
+       _IO_lock_unlock): Likewise.
+       * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use
+       compound literal.
+       * sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
+       Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on
+       __fork_lock.
+       * sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork,
+       free_mem): Likewise.
+       (__fork_lock): Change type to int.
+       * sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise.
+       * sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass
+       isem->private ^ FUTEX_PRIVATE_FLAG as second argument to
+       lll_futex_wake.
+       * sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise.
+       * sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise.
+       * sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private):
+       New function.
+       (__lll_lock_wait, __lll_timedlock_wait): Add private argument and
+       pass it through to lll_futex_*wait, only compile in when
+       IS_IN_libpthread.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.c
+       (__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private
+       argument and pass it through to lll_futex_*wait.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all
+       lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp.
+       lll_robust_*.  Renamed all __lll_mutex_* resp. __lll_robust_mutex_*
+       inline functions to __lll_* resp. __lll_robust_*.
+       (LLL_MUTEX_LOCK_INITIALIZER): Remove.
+       (lll_mutex_dead): Add private argument.
+       (__lll_lock_wait_private): New prototype.
+       (__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait,
+       __lll_robust_lock_timedwait): Add private argument to prototypes.
+       (__lll_lock): Add private argument, if it is constant LLL_PRIVATE,
+       call __lll_lock_wait_private, otherwise pass private to
+       __lll_lock_wait.
+       (__lll_robust_lock, __lll_cond_lock, __lll_timedlock,
+       __lll_robust_timedlock): Add private argument, pass it to
+       __lll_*wait functions.
+       (__lll_unlock): Add private argument, if it is constant LLL_PRIVATE,
+       call __lll_unlock_wake_private, otherwise pass private to
+       __lll_unlock_wake.
+       (__lll_robust_unlock): Add private argument, pass it to
+       __lll_robust_unlock_wake.
+       (lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock,
+       lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private
+       argument, pass it through to __lll_* inline function.
+       (__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove.
+       (lll_lock_t): Remove.
+       (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
+       __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
+       lll_cond_wake, lll_cond_broadcast): Remove.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including
+       the header from assembler.  Renamed all lll_mutex_* resp.
+       lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
+       (LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED,
+       LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove.
+       (__lll_mutex_lock_wait, __lll_mutex_timedlock_wait,
+       __lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake):
+       Remove prototype.
+       (__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define.
+       (lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER*
+       rather than LLL_MUTEX_LOCK_INITIALIZER* macros.
+       (lll_trylock): Likewise, use __lll_trylock_asm, pass
+       MULTIPLE_THREADS_OFFSET as another asm operand.
+       (lll_lock): Add private argument, use __lll_lock_asm_start, pass
+       MULTIPLE_THREADS_OFFSET as last asm operand, call
+       __lll_lock_wait_private if private is constant LLL_PRIVATE,
+       otherwise pass private as another argument to __lll_lock_wait.
+       (lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
+       lll_timedlock, lll_robust_timedlock): Add private argument, pass
+       private as another argument to __lll_*lock_wait call.
+       (lll_unlock): Add private argument, use __lll_unlock_asm, pass
+       MULTIPLE_THREADS_OFFSET as another asm operand, call
+       __lll_unlock_wake_private if private is constant LLL_PRIVATE,
+       otherwise pass private as another argument to __lll_unlock_wake.
+       (lll_robust_unlock): Add private argument, pass private as another
+       argument to __lll_unlock_wake.
+       (lll_robust_dead): Add private argument, use __lll_private_flag
+       macro.
+       (lll_islocked): Use LLL_LOCK_INITIALIZER instead of
+       LLL_MUTEX_LOCK_INITIALIZER.
+       (lll_lock_t): Remove.
+       (LLL_LOCK_INITIALIZER_WAITERS): Define.
+       (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
+       __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
+       lll_cond_wake, lll_cond_broadcast): Remove.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert
+       2007-05-2{3,9} changes.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include
+       kernel-features.h and lowlevellock.h.
+       (LOAD_PRIVATE_FUTEX_WAIT): Define.
+       (LOAD_FUTEX_WAIT): Rewritten.
+       (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
+       define.
+       (__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
+       (__lll_mutex_lock_wait): Rename to ...
+       (__lll_lock_wait): ... this.  Take futex addr from %edx instead of
+       %ecx, %ecx is now private argument.  Don't compile in for libc.so.
+       (__lll_mutex_timedlock_wait): Rename to ...
+       (__lll_timedlock_wait): ... this.  Use __NR_gettimeofday.  %esi
+       contains private argument.  Don't compile in for libc.so.
+       (__lll_mutex_unlock_wake): Rename to ...
+       (__lll_unlock_wake): ... this.  %ecx contains private argument.
+       Don't compile in for libc.so.
+       (__lll_timedwait_tid): Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include
+       kernel-features.h and lowlevellock.h.
+       (LOAD_FUTEX_WAIT): Define.
+       (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
+       define.
+       (__lll_robust_mutex_lock_wait): Rename to ...
+       (__lll_robust_lock_wait): ... this.  Futex addr is now in %edx
+       argument, %ecx argument contains private.  Use LOAD_FUTEX_WAIT
+       macro.
+       (__lll_robust_mutex_timedlock_wait): Rename to ...
+       (__lll_robust_timedlock_wait): ... this.  Use __NR_gettimeofday.
+       %esi argument contains private, use LOAD_FUTEX_WAIT macro.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
+       PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to
+       __lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address
+       to __lll_lock_wait in %edx.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
+       Include lowlevellock.h and pthread-errnos.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
+       FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
+       (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass
+       cond_lock address in %edx rather than %ecx to __lll_lock_wait,
+       pass LLL_SHARED in %ecx to both __lll_lock_wait and
+       __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S:
+       Include lowlevellock.h and pthread-errnos.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
+       (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass
+       cond_lock address in %edx rather than %ecx to __lll_lock_wait,
+       pass LLL_SHARED in %ecx to both __lll_lock_wait and
+       __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
+       Include lowlevellock.h.
+       (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
+       Don't define.
+       (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass
+       cond_lock address in %edx rather than %ecx to __lll_lock_wait,
+       pass LLL_SHARED in %ecx to both __lll_lock_wait and
+       __lll_unlock_wake.  Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_*
+       to __lll_*, pass cond_lock address in %edx rather than %ecx to
+       __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass
+       MUTEX(%ebx) address in %edx rather than %ecx to
+       __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.  Move return value from %ecx to %edx
+       register.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
+       Don't define.
+       (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
+       MUTEX(%ebp) address in %edx rather than %ecx to
+       __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.  Move return value from %ecx to %edx
+       register.  Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
+       Don't define.
+       (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
+       MUTEX(%ebp) address in %edx rather than %ecx to
+       __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.  Move return value from %ecx to %edx
+       register.  Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass
+       MUTEX(%edi) address in %edx rather than %ecx to
+       __lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
+       MUTEX(%ebx) address in %edx rather than %ecx to
+       __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.  Move return value from %ecx to %edx
+       register.
+       * sysdeps/unix/sysv/linux/i386/pthread_once.S: Include
+       lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
+       define.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAKE): Don't define.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include
+       lowlevellock.h.
+       (LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define.
+       (sem_timedwait): Use __NR_gettimeofday.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include
+       lowlevellock.h.
+       (LOCK): Don't define.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include
+       lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAIT): Don't define.
+       * sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there
+       are waiters.
+       * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert
+       2007-05-2{3,9} changes.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include
+       kernel-features.h and lowlevellock.h.
+       (LOAD_PRIVATE_FUTEX_WAIT): Define.
+       (LOAD_FUTEX_WAIT): Rewritten.
+       (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
+       (__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
+       (__lll_mutex_lock_wait): Rename to ...
+       (__lll_lock_wait): ... this.  %esi is now private argument.
+       Don't compile in for libc.so.
+       (__lll_mutex_timedlock_wait): Rename to ...
+       (__lll_timedlock_wait): ... this.  %esi contains private argument.
+       Don't compile in for libc.so.
+       (__lll_mutex_unlock_wake): Rename to ...
+       (__lll_unlock_wake): ... this.  %esi contains private argument.
+       Don't compile in for libc.so.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include
+       kernel-features.h and lowlevellock.h.
+       (LOAD_FUTEX_WAIT): Define.
+       (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
+       (__lll_robust_mutex_lock_wait): Rename to ...
+       (__lll_robust_lock_wait): ... this.  %esi argument contains private.
+       Use LOAD_FUTEX_WAIT macro.
+       (__lll_robust_mutex_timedlock_wait): Rename to ...
+       (__lll_robust_timedlock_wait): ... this. %esi argument contains
+       private, use LOAD_FUTEX_WAIT macro.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include
+       lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
+       PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to
+       __lll_lock_wait and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S:
+       Include lowlevellock.h and pthread-errnos.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
+       FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
+       (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*,
+       pass LLL_SHARED in %esi to both __lll_lock_wait and
+       __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S:
+       Include lowlevellock.h and pthread-errnos.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
+       (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*,
+       pass LLL_SHARED in %esi to both __lll_lock_wait and
+       __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*,
+       pass LLL_SHARED in %esi to both __lll_lock_wait and
+       __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+       (__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_*
+       to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+       Don't define.
+       (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*,
+       pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+       Don't define.
+       (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
+       pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+       Don't define.
+       (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
+       pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+       Don't define.
+       (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*,
+       pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S:
+       Include lowlevellock.h.
+       (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+       Don't define.
+       (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
+       pass PSHARED(%rdi) in %ecx to both __lll_lock_wait
+       and __lll_unlock_wake.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include
+       lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
+       define.
+       * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAKE): Don't define.
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include
+       lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAIT): Don't define.
+       * sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include
+       lowlevellock.h.
+       (LOCK): Don't define.
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include
+       lowlevellock.h.
+       (LOCK, SYS_futex, FUTEX_WAIT): Don't define.
+       * sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file.
+       * sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
+       (__lll_lock_wait_private): New function.
+       (__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass
+       it to lll_futex_*wait.  Don't compile in for libc.so.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c:
+       Remove.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
+       (struct sparc_pthread_barrier): Remove.
+       (pthread_barrier_wait): Use union sparc_pthread_barrier instead of
+       struct sparc_pthread_barrier.  Pass
+       ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock
+       and lll_futex_wait macros.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c:
+       Remove.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
+       Include sparc pthread_barrier_wait.c instead of generic one.
+
+2007-07-30  Jakub Jelinek  <jakub@redhat.com>
+
+       * tst-rwlock14.c (do_test): Avoid warnings on 32-bit arches.
+
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
+       (pthread_rwlock_timedrdlock): Copy futex retval to %esi rather than
+       %ecx.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
+       (pthread_rwlock_timedwrlock): Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
+       (__pthread_rwlock_unlock): Fix MUTEX != 0 args to __lll_*.
+
+2007-07-31  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/sparc/tls.h (tcbhead_t): Add private_futex field.
+
+2007-07-26  Jakub Jelinek  <jakub@redhat.com>
+
+       * tst-locale2.c (useless): Add return statement.
+
+2007-07-24  Jakub Jelinek  <jakub@redhat.com>
+
+       * allocatestack.c (__nptl_setxid, __wait_lookup_done): Replace
+       lll_private_futex_* (*) with lll_futex_* (*, LLL_PRIVATE).
+       * pthread_create.c (start_thread): Likewise.
+       * init.c (sighandler_setxid): Likewise.
+       * sysdeps/alpha/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/ia64/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/i386/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/s390/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/powerpc/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/x86_64/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/sparc/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/sh/tls.h (THREAD_GSCOPE_RESET_FLAG): Likewise.
+       * sysdeps/pthread/aio_misc.h (AIO_MISC_NOTIFY, AIO_MISC_WAIT):
+       Likewise.
+       * sysdeps/pthread/gai_misc.h (GAI_MISC_NOTIFY, GAI_MISC_WAIT):
+       Likewise.
+       * sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
+       Likewise.
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h (__rtld_waitzero,
+       __rtld_notify): Likewise.
+       * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/pthread_once.c (clear_once_control,
+       __pthread_once): Likewise.
+       * sysdeps/unix/sysv/linux/alpha/pthread_once.c (clear_once_control,
+       __pthread_once): Add LLL_PRIVATE as last argument to lll_futex_*.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (FUTEX_PRIVATE_FLAG,
+       LLL_PRIVATE, LLL_SHARED, __lll_private_flag): Define.
+       (lll_futex_wait): Add private argument, define as wrapper around
+       lll_futex_timed_wait.
+       (lll_futex_timed_wait, lll_futex_wake): Add private argument,
+       use __lll_private_flag macro.
+       (lll_robust_mutex_dead, __lll_mutex_unlock, __lll_robust_mutex_unlock,
+       __lll_mutex_unlock_force): Pass LLL_SHARED as last arg to lll_futex_*.
+       * sysdeps/unix/sysv/linux/ia64/pthread_once.c (clear_once_control,
+       __pthread_once): Add LLL_PRIVATE as last argument to lll_futex_*.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h (FUTEX_PRIVATE_FLAG,
+       LLL_PRIVATE, LLL_SHARED, __lll_private_flag): Define.
+       (lll_futex_wait): Add private argument, define as wrapper around
+       lll_futex_timed_wait.
+       (lll_futex_timed_wait, lll_futex_wake): Add private argument,
+       use __lll_private_flag macro.
+       (__lll_mutex_unlock, __lll_robust_mutex_unlock, lll_wait_tid,
+       __lll_mutex_unlock_force): Pass LLL_SHARED as last arg to lll_futex_*.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h (__lll_private_flag):
+       Define.
+       (lll_futex_timed_wait, lll_futex_wake): Use it.
+       (lll_private_futex_wait, lll_private_futex_timed_wait,
+       lll_private_futex_wake): Removed.
+       * sysdeps/unix/sysv/linux/s390/pthread_once.c (clear_once_control,
+       __pthread_once): Add LLL_PRIVATE as last argument to lll_futex_*.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h (FUTEX_PRIVATE_FLAG,
+       LLL_PRIVATE, LLL_SHARED, __lll_private_flag): Define.
+       (lll_futex_wait): Add private argument, define as wrapper around
+       lll_futex_timed_wait.
+       (lll_futex_timed_wait, lll_futex_wake): Add private argument,
+       use __lll_private_flag macro.
+       (lll_robust_mutex_dead, __lll_mutex_unlock, __lll_robust_mutex_unlock,
+       lll_wait_tid, __lll_mutex_unlock_force): Pass LLL_SHARED as last arg
+       to lll_futex_*.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+       (lll_private_futex_wait, lll_private_futex_timed_wait,
+       lll_private_futex_wake): Removed.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (__lll_private_flag):
+       Fix !__ASSUME_PRIVATE_FUTEX non-constant private case.
+       (lll_private_futex_wait, lll_private_futex_timed_wait,
+       lll_private_futex_wake): Removed.
+       * sysdeps/unix/sysv/linux/sparc/pthread_once.c (clear_once_control,
+       __pthread_once): Add LLL_PRIVATE as last argument to lll_futex_*.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (FUTEX_PRIVATE_FLAG,
+       LLL_PRIVATE, LLL_SHARED, __lll_private_flag): Define.
+       (lll_futex_wait): Add private argument, define as wrapper around
+       lll_futex_timed_wait.
+       (lll_futex_timed_wait, lll_futex_wake): Add private argument,
+       use __lll_private_flag macro.
+       (lll_robust_mutex_dead, __lll_mutex_unlock, __lll_robust_mutex_unlock,
+       lll_wait_tid, __lll_mutex_unlock_force): Pass LLL_SHARED as last arg
+       to lll_futex_*.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h (__lll_private_flag):
+       Define.
+       (lll_futex_timed_wait, lll_futex_wake): Use it.
+       (lll_private_futex_wait, lll_private_futex_timed_wait,
+       lll_private_futex_wake): Removed.
+
+2007-07-27  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/sparc/tls.h (tcbhead_t): Move gscope_flag to the end
+       of the structure for sparc32.
+
+2007-07-26  Aurelien Jarno  <aurelien@aurel32.net>
+
+       * sysdeps/sparc/tls.h (tcbhead_t): Add gscope_flag.
+
+2007-07-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S: Fix
+       code used when private futexes are assumed.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+       Likewise.
+
+2007-07-23  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+       (__lll_private_flag): Define.
+       (lll_futex_wait): Define as a wrapper around lll_futex_timed_wait.
+       (lll_futex_timed_wait, lll_futex_wake, lll_futex_wake_unlock): Use
+       __lll_private_flag.
+       (lll_private_futex_wait, lll_private_futex_timedwait,
+       lll_private_futex_wake): Define as wrapper around non-_private
+       macros.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+       (__lll_private_flag): Define.
+       (lll_futex_timed_wait, lll_futex_wake): Use __lll_private_flag.
+       (lll_private_futex_wait, lll_private_futex_timedwait,
+       lll_private_futex_wake): Define as wrapper around non-_private
+       macros.
+
+2007-07-10  Steven Munroe  <sjmunroe@us.ibm.com>
+
+       * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Add LLL_SHARED
+       parameter to lll_futex_wait call.
+       * pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise.
+
+       * sysdeps/unix/sysv/linux/powerpc/pthread_once.c (__pthread_once):
+       Replace lll_futex_wait with lll_private_futex_wait.
+       * sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
+       Add LLL_SHARED parameter to lll_futex_wake().
+
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Define LLL_PRIVATE
+       LLL_SHARED, lll_private_futex_wait, lll_private_futex_timed_wait and
+       lll_private_futex_wake.
+       (lll_futex_wait): Add private parameter. Adjust FUTEX_PRIVATE_FLAG
+       bit from private parm before syscall.
+       (lll_futex_timed_wait): Likewise.
+       (lll_futex_wake): Likewise.
+       (lll_futex_wake_unlock): Likewise.
+       (lll_mutex_unlock): Add LLL_SHARED parm to lll_futex_wake call.
+       (lll_robust_mutex_unlock): Likewise.
+       (lll_mutex_unlock_force): Likewise.
+       (lll_wait_tid): Add LLL_SHARED parm to lll_futex_wait call.
+
+2007-07-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S: Fix
+       compilation when unconditionally using private futexes.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Likewise.
+
+2007-07-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/pthread/bits/stdio-lock.h (_IO_acquire_lock_clear_flags2):
+       Define.
+
+2007-07-06  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/sh/tls.h: Include stdlib.h, list.h, sysdep.h and
+       kernel-features.h.
+
+2007-05-16  Roland McGrath  <roland@redhat.com>
+
+       * init.c (__nptl_initial_report_events): New variable.
+       (__pthread_initialize_minimal_internal): Initialize pd->report_events
+       to that.
+
+2007-06-22  Jakub Jelinek  <jakub@redhat.com>
+
+       * pthread_getattr_np.c (pthread_getattr_np): Clear cpuset and
+       cpusetsize if pthread_getaffinity_np failed with ENOSYS.
+
+2007-06-19  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h: Remove mrlock
+       implementation.
+
+2007-06-18  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthreadP.h: Define PTHREAD_MUTEX_TYPE.
+       * phtread_mutex_lock.c: Use PTHREAD_MUTEX_TYPE.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+
+2007-06-17  Andreas Schwab  <schwab@suse.de>
+
+       * sysdeps/pthread/pt-initfini.c: Tell gcc about the nonstandard
+       sections.
+
+2007-06-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * allocatestack.c (allocate_stack): Make code compile if
+       __ASSUME_PRIVATE_FUTEX is set.
+
+2007-06-17  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S:
+       (__pthread_rwlock_rdlock): Don't use non SH-3/4 instruction.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S:
+       (__pthread_rwlock_wrlock): Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S:
+       (pthread_rwlock_timedrdlock): Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S:
+       (pthread_rwlock_timedwrlock): Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S:
+       (__pthread_rwlock_unlock): Likewise.
+
+2007-06-10  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/sh/tcb-offsets.sym: Add PRIVATE_FUTEX.
+       * sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h: Include endian.h.
+       Split __flags into __flags, __shared, __pad1 and __pad2.
+       * sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S: Use private
+       futexes if they are available.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S: Adjust so that change
+       in libc-lowlevellock.S allow using private futexes.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Define
+       FUTEX_PRIVATE_FLAG.  Add additional parameter to lll_futex_wait,
+       lll_futex_timed_wait and lll_futex_wake.  Change lll_futex_wait
+       to call lll_futex_timed_wait.  Add lll_private_futex_wait,
+       lll_private_futex_timed_wait and lll_private_futex_wake.
+       (lll_robust_mutex_unlock): Fix typo.
+       * sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S: Use private
+       field in futex command setup.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Use
+       COND_NWAITERS_SHIFT instead of COND_CLOCK_BITS.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_once.S: Use private futexes
+       if they are available.  Remove clear_once_control.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S: Use private
+       futexes if they are available.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/sem_post.S: Add private futex support.
+       Wake only when there are waiters.
+       * sysdeps/unix/sysv/linux/sh/sem_wait.S: Add private futex
+       support.  Indicate that there are waiters.  Remove unnecessary
+       extra cancellation test.
+       * sysdeps/unix/sysv/linux/sh/sem_timedwait.S: Likewise.  Removed
+       left-over duplication of __sem_wait_cleanup.
+
+2007-06-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Add additional
+       parameter to lll_futex_wait, lll_futex_timed_wait, and
+       lll_futex_wake.  Change lll_futex_wait to call lll_futex_timed_wait.
+       Add lll_private_futex_wait, lll_private_futex_timed_wait, and
+       lll_private_futex_wake.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
+       * allocatestack.c: Adjust use of lll_futex_* macros.
+       * init.c: Likewise.
+       * lowlevellock.h: Likewise.
+       * pthread_barrier_wait.c: Likewise.
+       * pthread_cond_broadcast.c: Likewise.
+       * pthread_cond_destroy.c: Likewise.
+       * pthread_cond_signal.c: Likewise.
+       * pthread_cond_timedwait.c: Likewise.
+       * pthread_cond_wait.c: Likewise.
+       * pthread_create.c: Likewise.
+       * pthread_mutex_lock.c: Likewise.
+       * pthread_mutex_setprioceiling.c: Likewise.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       * pthread_rwlock_timedrdlock.c: Likewise.
+       * pthread_rwlock_timedwrlock.c: Likewise.
+       * pthread_rwlock_unlock.c: Likewise.
+       * sysdeps/alpha/tls.h: Likewise.
+       * sysdeps/i386/tls.h: Likewise.
+       * sysdeps/ia64/tls.h: Likewise.
+       * sysdeps/powerpc/tls.h: Likewise.
+       * sysdeps/pthread/aio_misc.h: Likewise.
+       * sysdeps/pthread/gai_misc.h: Likewise.
+       * sysdeps/s390/tls.h: Likewise.
+       * sysdeps/sh/tls.h: Likewise.
+       * sysdeps/sparc/tls.h: Likewise.
+       * sysdeps/unix/sysv/linux/fork.c: Likewise.
+       * sysdeps/unix/sysv/linux/lowlevellock.c: Likewise.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.c: Likewise.
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h: Likewise.
+       * sysdeps/unix/sysv/linux/sem_post.c: Likewise.
+       * sysdeps/unix/sysv/linux/sem_timedwait.c: Likewise.
+       * sysdeps/unix/sysv/linux/sem_wait.c: Likewise.
+       * sysdeps/unix/sysv/linux/unregister-atfork.c: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/pthread_once.c: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c:
+       Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c: Likewise.
+       * sysdeps/x86_64/tls.h: Likewise.
+
+2007-05-29  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_getattr_np.c: No need to install a cancellation handler,
+       this is no cancellation point.
+       * pthread_getschedparam.c: Likewise.
+       * pthread_setschedparam.c: Likewise.
+       * pthread_setschedprio.c: Likewise.
+       * sysdeps/unix/sysv/linux/lowlevellock.c: Remove all traces of
+       lll_unlock_wake_cb.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+
+       * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Checking
+       whether there are more than one thread makes no sense here since
+       we only call the slow path if the locks are taken.
+       * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Likewise.
+
+       * sysdeps/unix/sysv/linux/internaltypes.h: Introduce
+       COND_NWAITERS_SHIFT.
+       * pthread_cond_destroy.c: Use COND_NWAITERS_SHIFT instead of
+       COND_CLOCK_BITS.
+       * pthread_cond_init.c: Likewise.
+       * pthread_cond_timedwait.c: Likewise.
+       * pthread_cond_wait.c: Likewise.
+       * pthread_condattr_getclock.c: Likewise.
+       * pthread_condattr_setclock.c: Likewise.
+       * sysdeps/unix/sysv/linux/lowlevelcond.sym: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+
+2007-05-28  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/pthread_attr_setstacksize.c: Include
+       unistd.h.
+
+       * sysdeps/i386/tls.h (THREAD_GSCOPE_RESET_FLAG): Use explicit
+       insn suffix.
+       (THREAD_GSCOPE_GET_FLAG): Remove.
+       * sysdeps/x86_64/tls.h (THREAD_GSCOPE_GET_FLAG): Remove.
+       * allocatestack.c (__wait_lookup_done): Revert 2007-05-24
+       changes.
+       * sysdeps/powerpc/tls.h (tcbhead_t): Remove gscope_flag.
+       (THREAD_GSCOPE_GET_FLAG): Remove.
+       (THREAD_GSCOPE_RESET_FLAG): Use THREAD_SELF->header.gscope_flag
+       instead of THREAD_GSCOPE_GET_FLAG.
+       (THREAD_GSCOPE_SET_FLAG): Likewise.  Add atomic_write_barrier after
+       it.
+       * sysdeps/s390/tls.h (THREAD_GSCOPE_FLAG_UNUSED,
+       THREAD_GSCOPE_FLAG_USED, THREAD_GSCOPE_FLAG_WAIT,
+       THREAD_GSCOPE_RESET_FLAG, THREAD_GSCOPE_SET_FLAG,
+       THREAD_GSCOPE_WAIT): Define.
+       * sysdeps/sparc/tls.h (THREAD_GSCOPE_FLAG_UNUSED,
+       THREAD_GSCOPE_FLAG_USED, THREAD_GSCOPE_FLAG_WAIT,
+       THREAD_GSCOPE_RESET_FLAG, THREAD_GSCOPE_SET_FLAG,
+       THREAD_GSCOPE_WAIT): Define.
+       * sysdeps/sh/tls.h (THREAD_GSCOPE_FLAG_UNUSED,
+       THREAD_GSCOPE_FLAG_USED, THREAD_GSCOPE_FLAG_WAIT,
+       THREAD_GSCOPE_RESET_FLAG, THREAD_GSCOPE_SET_FLAG,
+       THREAD_GSCOPE_WAIT): Define.
+       * sysdeps/ia64/tls.h (THREAD_GSCOPE_FLAG_UNUSED,
+       THREAD_GSCOPE_FLAG_USED, THREAD_GSCOPE_FLAG_WAIT,
+       THREAD_GSCOPE_RESET_FLAG, THREAD_GSCOPE_SET_FLAG,
+       THREAD_GSCOPE_WAIT): Define.
+
+2007-05-24  Richard Henderson  <rth@redhat.com>
+
+       * descr.h (struct pthread): Add header.gscope_flag.
+       * sysdeps/alpha/tls.h (THREAD_GSCOPE_FLAG_UNUSED,
+       THREAD_GSCOPE_FLAG_USED, THREAD_GSCOPE_FLAG_WAIT,
+       THREAD_GSCOPE_RESET_FLAG, THREAD_GSCOPE_SET_FLAG,
+       THREAD_GSCOPE_WAIT): Define.
+
+2007-05-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * init.c: Make it compile with older kernel headers.
+
+       * tst-initializers1.c: Show through exit code which test failed.
+
+       * pthread_rwlock_init.c: Also initialize __shared field.
+       * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Split __flags
+       element in rwlock structure into four byte elements.  One of them is
+       the new __shared element.
+       * sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h [__WORDSIZE=32]:
+       Likewise.
+       [__WORDSIZE=64]: Renamed __pad1 element int rwlock structure to
+       __shared, adjust names of other padding elements.
+       * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h: Likewise.
+       * sysdeps/pthread/pthread.h: Adjust rwlock initializers.
+       * sysdeps/unix/sysv/linux/lowlevelrwlock.sym: Add PSHARED.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Define
+       FUTEX_PRIVATE_FLAG.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Change main
+       futex to use private operations if possible.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Likewise.
+
+2007-05-26  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthreadP.h (PTHREAD_RWLOCK_PREFER_READER_P): Define.
+       * pthread_rwlock_rdlock.c: Use PTHREAD_RWLOCK_PREFER_READER_P.
+       * pthread_rwlock_timedrdlock.c: Likewise.
+       * pthread_rwlock_tryrdlock.c: Likewise.
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_trywait.S (sem_trywait): Tiny
+       optimization.
+
+       * sysdeps/unix/sysv/linux/sem_wait.c: Add missing break.
+       * sysdeps/unix/sysv/linux/sem_timedwait.c: Removed left-over
+       duplication of __sem_wait_cleanup.
+
+       * allocatestack.c: Revert last change.
+       * init.c: Likewise.
+       * sysdeps/i386/tls.h: Likewise.
+       * sysdeps/x86_64/tls.h: Likewise.
+       * descr.h [TLS_DTV_AT_TP] (struct pthread): Add private_futex field to
+       header structure.
+       * sysdeps/powerpc/tcb-offsets.sym: Add PRIVATE_FUTEX_OFFSET.
+
+       * sysdeps/unix/sysv/linux/internaltypes.h (struct pthread_barrier):
+       Add private field.
+       * sysdeps/unix/sysv/linux/lowlevelbarrier.sym: Add PRIVATE definition.
+       * pthread_barrier_init.c: Set private flag if pshared and private
+       futexes are supported.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Use
+       private field in futex command setup.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Likewise.
+
+2007-05-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Add private futex
+       support.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Likewise.
+
+       * semaphoreP.h: Declare __old_sem_init and __old_sem_wait.
+       * sem_init.c (__new_sem_init): Rewrite to initialize all three
+       fields in the structure.
+       (__old_sem_init): New function.
+       * sem_open.c: Initialize all fields of the structure.
+       * sem_getvalue.c: Adjust for renamed element.
+       * sysdeps/unix/sysv/linux/Makefile [subdir=nptl]
+       (gen-as-const-headers): Add structsem.sym.
+       * sysdeps/unix/sysv/linux/structsem.sym: New file.
+       * sysdeps/unix/sysv/linux/internaltypes.h: Rename struct sem to
+       struct new_sem.  Add struct old_sem.
+       * sysdeps/unix/sysv/linux/sem_post.c: Wake only when there are waiters.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Likewise.
+       * sysdeps/unix/sysv/linux/sem_wait.c: Indicate that there are waiters.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/sem_timedwait.c: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Likewise.
+       * Makefile (tests): Add tst-sem10, tst-sem11, tst-sem12.
+       * tst-sem10.c: New file.
+       * tst-sem11.c: New file.
+       * tst-sem12.c: New file.
+       * tst-typesizes.c: Test struct new_sem and struct old_sem instead
+       of struct sem.
+
+2007-05-25  Ulrich Drepper  <drepper@redhat.com>
+           Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S (sem_timedwait):
+       Move __pthread_enable_asynccancel right before futex syscall.
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S (sem_timedwait):
+       Likewise.
+
+2007-05-24  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/i386/tls.h (THREAD_SET_PRIVATE_FUTEX,
+       THREAD_COPY_PRIVATE_FUTEX): Define.
+       * sysdeps/x86_64/tls.h (THREAD_SET_PRIVATE_FUTEX,
+       THREAD_COPY_PRIVATE_FUTEX): Define.
+       * allocatestack.c (allocate_stack): Use THREAD_COPY_PRIVATE_FUTEX.
+       * init.c (__pthread_initialize_minimal_internal): Use
+       THREAD_SET_PRIVATE_FUTEX.
+
+       * sysdeps/powerpc/tls.h (tcbhead_t): Add gscope_flag.
+       (THREAD_GSCOPE_FLAG_UNUSED, THREAD_GSCOPE_FLAG_USED,
+       THREAD_GSCOPE_FLAG_WAIT): Define.
+       (THREAD_GSCOPE_GET_FLAG, THREAD_GSCOPE_SET_FLAG,
+       THREAD_GSCOPE_RESET_FLAG, THREAD_GSCOPE_WAIT): Define.
+       * sysdeps/i386/tls.h (THREAD_GSCOPE_WAIT): Don't use
+       PTR_DEMANGLE.
+       (THREAD_GSCOPE_GET_FLAG): Define.
+       * sysdeps/x86_64/tls.h (THREAD_GSCOPE_GET_FLAG): Define.
+       * allocatestack.c (__wait_lookup_done): Use THREAD_GSCOPE_GET_FLAG
+       instead of ->header.gscope_flag directly.
+
+2007-05-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * init.c (__pthread_initialize_minimal_internal): Check whether
+       private futexes are available.
+       * allocatestack.c (allocate_stack): Copy private_futex field from
+       current thread into the new stack.
+       * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Use private
+       futexes if they are available.
+       * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Likewise
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Adjust so that change
+       in libc-lowlevellock.S allow using private futexes.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Define
+       FUTEX_PRIVATE_FLAG.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Use private futexes
+       if they are available.
+       * sysdeps/unix/sysv/linux/i386/pthread_once.S: Likewise.
+       * sysdeps/x86_64/tcb-offsets.sym: Add PRIVATE_FUTEX.
+       * sysdeps/i386/tcb-offsets.sym: Likewise.
+       * sysdeps/x86_64/tls.h (tcbhead_t): Add private_futex field.
+       * sysdeps/i386/tls.h (tcbhead_t): Likewise.
+
+2007-05-21  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread-functions.h (struct pthread_functions):
+       Remove ptr_wait_lookup_done again.
+       * init.c (pthread_functions): Don't add .ptr_wait_lookup_done here.
+       (__pthread_initialize_minimal_internal): Initialize
+       _dl_wait_lookup_done pointer in _rtld_global directly.
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c (__libc_pthread_init):
+       Remove code to code _dl_wait_lookup_done.
+       * sysdeps/x86_64/tls.h (THREAD_GSCOPE_WAIT): The pointer is not
+       encrypted for now.
+
+2007-05-21  Jakub Jelinek  <jakub@redhat.com>
+
+       * tst-robust9.c (do_test): Don't fail if ENABLE_PI and
+       pthread_mutex_init failed with ENOTSUP.
+
+2007-05-19  Ulrich Drepper  <drepper@redhat.com>
+
+       * allocatestack.c (__wait_lookup_done): New function.
+       * sysdeps/pthread/pthread-functions.h (struct pthread_functions):
+       Add ptr_wait_lookup_done.
+       * init.c (pthread_functions): Initialize .ptr_wait_lookup_done.
+       * pthreadP.h: Declare __wait_lookup_done.
+       * sysdeps/i386/tls.h (tcbhead_t): Add gscope_flag.
+       Define macros to implement reference handling of global scope.
+       * sysdeps/x86_64/tls.h: Likewise.
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c (__libc_pthread_init):
+       Initialize GL(dl_wait_lookup_done).
+
+2007-05-17  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #4512]
+       * pthread_mutex_lock.c: Preserve FUTEX_WAITERS bit when dead owner
+       is detected.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       Patch in part by Atsushi Nemoto <anemo@mba.ocn.ne.jp>.
+
+       * Makefile (tests): Add tst-robust9 and tst-robustpi9.
+       * tst-robust9.c: New file.
+       * tst-robustpi9.c: New file.
+
+       * sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Remove
+       unnecessary extra cancellation test.
+
+2007-05-14  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Remove unnecessary
+       extra cancellation test.
+       * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Likewise.
+
+2007-05-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * descr.h (struct pthread): Rearrange members to fill hole in
+       64-bit layout.
+
+       * sysdeps/unix/sysv/linux/pthread_setaffinity.c
+       (__pthread_setaffinity_new): If syscall was successful and
+       RESET_VGETCPU_CACHE is defined, use it before returning.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c: New file.
+
+2007-05-10  Jakub Jelinek  <jakub@redhat.com>
+
+       [BZ #4455]
+       * tst-align2.c: Include stackinfo.h.
+       * tst-getpid1.c: Likewise.
+
+2007-05-02  Carlos O'Donell  <carlos@systemhalted.org>
+
+       [BZ #4455]
+       * tst-align2.c (do_test): Add _STACK_GROWS_UP case.
+       * tst-getpid1.c (do_test): Likewise.
+
+       [BZ #4456]
+       * allocatestack.c (change_stack_perm): Add _STACK_GROWS_UP case.
+       (allocate_stack): Likewise.
+
+2007-05-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.c
+       (__lll_robust_lock_wait): Fix race caused by reloading of futex value.
+       (__lll_robust_timedlock_wait): Likewise.
+       Reported by Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>.
+
+2007-05-06  Mike Frysinger  <vapier@gentoo.org>
+
+       [BZ #4465]
+       * tst-cancel-wrappers.sh: Set C["fdatasync"] to 1.
+       * tst-cancel4.c (tf_fdatasync): New test.
+
+2007-04-27  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #4392]
+       * pthread_mutex_trylock.c (__pthread_mutex_trylock): Treat error
+       check mutexes like normal mutexes.
+
+       [BZ #4306]
+       * sysdeps/unix/sysv/linux/timer_create.c (timer_create):
+       Initialize the whole sigevent structure to appease valgrind.
+
+2007-04-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/x86_64/tls.h (tcbhead_t): Add vgetcpu_cache.
+       * sysdeps/x86_64/tcb-offsets.sym: Add VGETCPU_CACHE_OFFSET.
+
+2007-04-06  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-locale1.c: Avoid warnings.
+       * tst-locale2.c: Likewise.
+
+2007-03-19  Steven Munroe  <sjmunroe@us.ibm.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+       (__lll_robust_trylock): Add MUTEX_HINT_ACQ to lwarx instruction.
+
+2007-03-16  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/pthread/bits/libc-lock.h: Use __extern_inline and
+       __extern_always_inline where appropriate.
+       * sysdeps/pthread/pthread.h: Likewise.
+
+2007-03-13  Richard Henderson  <rth@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h (PSEUDO): Use two
+       separate cfi regions for the two subsections.
+
+2007-02-25  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Reset refcntr in
+       new thread, don't just decrement it.
+       Patch by Suzuki K P <suzuki@in.ibm.com>.
+
+2007-02-21  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread-functions.h: Correct last patch, correct
+       PTHFCT_CALL definition.
+
+2007-02-18  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread-functions.h: If PTR_DEMANGLE is not
+       available, don't use it.
+
+2007-02-09  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+       (__lll_mutex_timedlock_wait): Use correct pointer when we don't
+       call into the kernel to delay.
+
+2007-01-18  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-initializers1.c: We want to test the initializers as seen
+       outside of libc, so undefined _LIBC.
+
+       * pthread_join.c (cleanup): Avoid warning.
+
+2007-01-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+       (__lll_timedwait_tid): Add unwind info.
+
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c: Don't just copy the
+       function table, mangle the pointers.
+       * sysdeps/pthread/pthread-functions.h: Define PTHFCT_CALL.
+       * forward.c: Use PTHFCT_CALL and __libc_pthread_functions_init.
+       * sysdeps/pthread/bits/libc-lock.h: When using __libc_pthread_functions
+       demangle pointers before use.
+       * sysdeps/unix/sysv/linux/s390/jmp-unwind.c: Use PTHFCT_CALL to
+       demangle pointer.
+       * sysdeps/unix/sysv/linux/jmp-unwind.c: Likewise.
+       * sysdeps/pthread/setxid.h: Likewise.
+
+2007-01-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-rwlock7.c: Show some more information in case of correct
+       behavior.
+
+2007-01-11  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+       (lll_futex_timed_wait): Undo part of last change, don't negate
+       return value.
+
+2007-01-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Cleanups.  Define
+       FUTEX_CMP_REQUEUE and lll_futex_requeue.
+
+2006-12-28  David S. Miller  <davem@davemloft.net>
+
+       * shlib-versions: Fix sparc64 linux target specification.
+
+2007-01-10  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
+       Adjust include path for pthread_barrier_wait.c move.
+
+2006-12-21  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/pthread_kill.c (pthread_kill): Make sure
+       tid isn't reread from pd->tid in between ESRCH test and the syscall.
+
+2006-12-06  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/s390/s390-32/sysdep-cancel.h (PSEUDO): Handle
+       6 argument cancellable syscalls.
+       (STM_6, LM_6, LR7_0, LR7_1, LR7_2, LR7_3, LR7_4, LR7_5, LR7_6): Define.
+       * sysdeps/unix/sysv/linux/s390/s390-64/sysdep-cancel.h (PSEUDO): Handle
+       6 argument cancellable syscalls.
+       (STM_6, LM_6, LR7_0, LR7_1, LR7_2, LR7_3, LR7_4, LR7_5, LR7_6): Define.
+
+2006-12-09  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h
+       (__rtld_mrlock_initialize): Add missing closing parenthesis.
+
+2006-10-30  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/ia64/pthread_spin_unlock.c (pthread_spin_unlock): Use
+       __sync_lock_release instead of __sync_lock_release_si.
+
+2006-10-29  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/sysdep-cancel.h (RTLD_SINGLE_THREAD_P):
+       Define.
+       (SINGLE_THREAD_P): Define to 1 if IS_IN_rtld.
+       * sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/s390-32/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/s390-64/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/sysdep-cancel.h: Likewise.
+
+2006-10-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread_barrier_wait.c: Move to...
+       * pthread_barrier_wait.c: ...here.
+       * sysdeps/pthread/pthread_cond_broadcast.c: Move to...
+       * pthread_cond_broadcast.c: ...here.
+       * sysdeps/pthread/pthread_cond_signal.c: Move to...
+       * pthread_cond_signal.c: ...here.
+       * sysdeps/pthread/pthread_cond_timedwait.c: Move to...
+       * pthread_cond_timedwait.c: ...here.
+       * sysdeps/pthread/pthread_cond_wait.c: Move to...
+       * pthread_cond_wait.c: ...here.
+       * sysdeps/pthread/pthread_once.c: Move to...
+       * pthread_once.c: ...here.
+       * sysdeps/pthread/pthread_rwlock_rdlock.c: Move to...
+       * pthread_rwlock_rdlock.c: ...here.
+       * sysdeps/pthread/pthread_rwlock_timedrdlock.c: Move to...
+       * pthread_rwlock_timedrdlock.c: ...here.
+       * sysdeps/pthread/pthread_rwlock_timedwrlock.c: Move to...
+       * pthread_rwlock_timedwrlock.c: ...here.
+       * sysdeps/pthread/pthread_rwlock_unlock.c: Move to...
+       * pthread_rwlock_unlock.c: ...here.
+       * sysdeps/pthread/pthread_rwlock_wrlock.c: Move to...
+       * pthread_rwlock_wrlock.c: ...here.
+       * sysdeps/pthread/pthread_spin_destroy.c: Move to...
+       * pthread_spin_destroy.c: ...here.
+       * sysdeps/pthread/pthread_spin_init.c: Move to...
+       * pthread_spin_init.c: ...here.
+       * sysdeps/pthread/pthread_spin_unlock.c: Move to...
+       * pthread_spin_unlock.c: ...here.
+       * sysdeps/pthread/pthread_getcpuclockid.c: Move to...
+       * pthread_getcpuclockid.c: ...here.
+
+       * init.c: USE_TLS support is now always enabled.
+       * tst-tls5.h: Likewise.
+       * sysdeps/alpha/tls.h: Likewise.
+       * sysdeps/i386/tls.h: Likewise.
+       * sysdeps/ia64/tls.h: Likewise.
+       * sysdeps/powerpc/tls.h: Likewise.
+       * sysdeps/s390/tls.h: Likewise.
+       * sysdeps/sh/tls.h: Likewise.
+       * sysdeps/sparc/tls.h: Likewise.
+       * sysdeps/x86_64/tls.h: Likewise.
+
+2006-10-27  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h (__rtld_mrlock_lock,
+       __rtld_mrlock_change): Update oldval if atomic compare and exchange
+       failed.
+
+       * sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h (SINGLE_THREAD_P):
+       Define to THREAD_SELF->header.multiple_threads.
+       * sysdeps/unix/sysv/linux/ia64/sysdep-cancel.h (SINGLE_THREAD_P):
+       Likewise.
+       * sysdeps/unix/sysv/linux/i386/sysdep-cancel.h (SINGLE_THREAD_P):
+       Likewise.
+       * sysdeps/unix/sysv/linux/s390/s390-32/sysdep-cancel.h
+       (SINGLE_THREAD_P): Likewise.
+       * sysdeps/unix/sysv/linux/s390/s390-64/sysdep-cancel.h
+       (SINGLE_THREAD_P): Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h
+       (SINGLE_THREAD_P): Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h
+       (SINGLE_THREAD_P): Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h (SINGLE_THREAD_P):
+       Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h
+       (SINGLE_THREAD_P): Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep-cancel.h
+       (SINGLE_THREAD_P): Likewise.
+       * sysdeps/unix/sysv/linux/sh/sysdep-cancel.h (SINGLE_THREAD_P):
+       Likewise.
+
+2006-10-26  Jakub Jelinek  <jakub@redhat.com>
+
+       * pthread_attr_setstacksize.c (NEW_VERNUM): Define to GLIBC_2_3_3
+       by default rather than 2_3_3.
+
+2006-10-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h (__rtld_mrlock_lock,
+       __rtld_mrlock_unlock, __rtld_mrlock_change, __rtld_mrlock_done): Use
+       atomic_* instead of catomic_* macros.
+
+2006-10-12  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #3285]
+       * sysdeps/unix/sysv/linux/bits/local_lim.h: Add SEM_VALUE_MAX.
+       * sysdeps/unix/sysv/linux/powerpc/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/alpha/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/bits/local_lim.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/bits/semaphore.h: Remove SEM_VALUE_MAX.
+       * sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/bits/semaphore.h: Likewise.
+       * sysdeps/unix/sysv/linux/alpha/bits/semaphore.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/bits/semaphore.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/bits/semaphore.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/bits/semaphore.h: Likewise.
+
+2006-10-11  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/sysdep-cancel.h: Add support for
+       cancelable syscalls with six parameters.
+
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h: Use catomic_*
+       operations instead of atomic_*.
+
+2006-10-09  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/rtld-lowlevel.h: New file..
+
+2006-10-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/bits/local_lim.h: New file.
+       * sysdeps/unix/sysv/linux/powerpc/pthread_attr_setstack.c: New file.
+       * sysdeps/unix/sysv/linux/powerpc/pthread_attr_setstacksize.c:
+       New file.
+       * pthread_attr_setstack.c: Allow overwriting the version number of the
+       new symbol.
+       * pthread_attr_setstacksize.c: Likewise.
+       (__old_pthread_attr_setstacksize): If STACKSIZE_ADJUST is defined use
+       it.
+       * sysdeps/unix/sysv/linux/powerpc/Versions (libpthread): Add
+       pthread_attr_setstack and pthread_attr_setstacksize to GLIBC_2.6.
+
+2006-09-24  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #3251]
+       * descr.h (ENQUEUE_MUTEX_BOTH): Add cast to avoid warning.
+       Patch by Petr Baudis.
+
+2006-09-18  Jakub Jelinek  <jakub@redhat.com>
+
+       * tst-kill4.c (do_test): Explicitly set tf thread's stack size.
+
+       * tst-cancel2.c (tf): Loop as long as something was written.
+
+2006-09-12  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S: For PI
+       mutexes wake all mutexes.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Don't increment
+       WAKEUP_SEQ if this would increase the value beyond TOTAL_SEQ.
+       * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Likewise.
+
+2006-09-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-cond22.c (tf): Slight changes to the pthread_cond_wait use
+       to guarantee the thread is always canceled.
+
+2006-09-08  Jakub Jelinek  <jakub@redhat.com>
+
+       * tst-cond22.c: Include pthread.h instead of pthreadP.h.
+       Include stdlib.h.
+       * sysdeps/pthread/pthread_cond_wait.c (__condvar_cleanup): Only
+       increase FUTEX if increasing WAKEUP_SEQ.  Fix comment typo.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+
+2006-09-08  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #3123]
+       * sysdeps/pthread/pthread_cond_wait.c (__condvar_cleanup): Don't
+       increment WAKEUP_SEQ if this would increase the value beyond TOTAL_SEQ.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+       * Makefile (tests): Add tst-cond22.
+       * tst-cond22.c: New file.
+
+2006-09-05  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #3124]
+       * descr.h (struct pthread): Add parent_cancelhandling.
+       * sysdeps/pthread/createthread.c (create_thread): Pass parent
+       cancelhandling value to child.
+       * pthread_create.c (start_thread): If parent thread was canceled
+       reset the SIGCANCEL mask.
+       * Makefile (tests): Add tst-cancel25.
+       * tst-cancel25.c: New file.
+
+2006-09-05  Jakub Jelinek  <jakub@redhat.com>
+           Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/gai_misc.h (GAI_MISC_NOTIFY): Don't decrement
+       counterp if it is already zero.
+       * sysdeps/pthread/aio_misc.h (AIO_MISC_NOTIFY): Likewise..
+
+2006-03-04  Jakub Jelinek  <jakub@redhat.com>
+           Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h
+       (LLL_STUB_UNWIND_INFO_START, LLL_STUB_UNWIND_INFO_END,
+       LLL_STUB_UNWIND_INFO_3, LLL_STUB_UNWIND_INFO_4): Define.
+       (lll_mutex_lock, lll_robust_mutex_lock, lll_mutex_cond_lock,
+       lll_robust_mutex_cond_lock, lll_mutex_timedlock,
+       lll_robust_mutex_timedlock, lll_mutex_unlock,
+       lll_robust_mutex_unlock, lll_lock, lll_unlock): Use them.
+       Add _L_*_ symbols around the subsection.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Add unwind info.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Likewise.
+
+2006-03-03  Jakub Jelinek  <jakub@redhat.com>
+           Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+       (LLL_STUB_UNWIND_INFO_START, LLL_STUB_UNWIND_INFO_END,
+       LLL_STUB_UNWIND_INFO_5, LLL_STUB_UNWIND_INFO_6): Define.
+       (lll_mutex_lock, lll_robust_mutex_lock, lll_mutex_cond_lock,
+       lll_robust_mutex_cond_lock, lll_mutex_timedlock,
+       lll_robust_mutex_timedlock, lll_mutex_unlock,
+       lll_robust_mutex_unlock, lll_lock, lll_unlock): Use them.
+       Add _L_*_ symbols around the subsection.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Add unwind info.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
+
+2006-08-31  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Undo last
+       change because it can disturb too much existing code.  If real hard
+       reader preference is needed we'll introduce another type.
+       * sysdeps/pthread/pthread_rwlock_timedwrlock.c
+       (pthread_rwlock_timedwrlock): Likewise.
+       * sysdeps/pthread/pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock):
+       Likewise.
+
+2006-08-30  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Respect
+       reader preference.
+       * sysdeps/pthread/pthread_rwlock_timedwrlock.c
+       (pthread_rwlock_timedwrlock): Likewise.
+       * sysdeps/pthread/pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock):
+       Likewise.
+
+2006-08-25  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c (freeres_libpthread):
+       Only define ifdef SHARED.
+
+2006-08-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * allocatestack.c (queue_stack): Move freeing of surplus stacks to...
+       (free_stacks): ...here.
+       (__free_stack_cache): New function.
+       * pthreadP.h: Declare __free_stack_cache.
+       * sysdeps/pthread/pthread-functions.h (pthread_functions): Add
+       ptr_freeres.
+       * init.c (pthread_functions): Initialize ptr_freeres.
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c (freeres_libptread):
+       New freeres function.
+
+2006-07-30  Joseph S. Myers  <joseph@codesourcery.com>
+
+       [BZ #3018]
+       * Makefile (extra-objs): Add modules to extra-test-objs instead.
+
+2006-08-20  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
+       _XOPEN_REALTIME_THREADS.
+
+2006-08-15  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/clock_settime.c (INTERNAL_VSYSCALL): Use
+       HAVE_CLOCK_GETRES_VSYSCALL as guard macro rather than
+       HAVE_CLOCK_GETTIME_VSYSCALL.
+       (maybe_syscall_settime_cpu): Use plain INTERNAL_VSYSCALL here.
+
+2006-08-14  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h
+       (_POSIX_THREAD_PRIO_PROTECT): Define to 200112L.
+       * descr.h (struct priority_protection_data): New type.
+       (struct pthread): Add tpp field.
+       * pthreadP.h (PTHREAD_MUTEX_PP_NORMAL_NP,
+       PTHREAD_MUTEX_PP_RECURSIVE_NP, PTHREAD_MUTEX_PP_ERRORCHECK_NP,
+       PTHREAD_MUTEX_PP_ADAPTIVE_NP): New enum values.
+       * pthread_mutex_init.c (__pthread_mutex_init): Handle non-robust
+       TPP mutexes.
+       * pthread_mutex_lock.c (__pthread_mutex_lock): Handle TPP mutexes.
+       * pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
+       * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Likewise.
+       * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Likewise.
+       * tpp.c: New file.
+       * pthread_setschedparam.c (__pthread_setschedparam): Handle priority
+       boosted by TPP.
+       * pthread_setschedprio.c (pthread_setschedprio): Likewise.
+       * pthread_mutexattr_getprioceiling.c
+       (pthread_mutexattr_getprioceiling): If ceiling is 0, ensure it is
+       in the SCHED_FIFO priority range.
+       * pthread_mutexattr_setprioceiling.c
+       (pthread_mutexattr_setprioceiling): Fix prioceiling validation.
+       * pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling): Fail
+       if mutex is not TPP.  Ceiling is now in __data.__lock.
+       * pthread_mutex_setprioceiling.c: Include stdbool.h.
+       (pthread_mutex_setprioceiling): Fix prioceiling validation.  Ceiling
+       is now in __data.__lock.  Add locking.
+       * pthread_create.c (__free_tcb): Free pd->tpp structure.
+       * Makefile (libpthread-routines): Add tpp.
+       (xtests): Add tst-mutexpp1, tst-mutexpp6 and tst-mutexpp10.
+       * tst-tpp.h: New file.
+       * tst-mutexpp1.c: New file.
+       * tst-mutexpp6.c: New file.
+       * tst-mutexpp10.c: New file.
+       * tst-mutex1.c (TEST_FUNCTION): Don't redefine if already defined.
+       * tst-mutex6.c (TEST_FUNCTION): Likewise.
+
+2006-08-12  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #2843]
+       * pthread_join.c (pthread_join): Account for self being canceled
+       when checking for deadlocks.
+       * tst-join5.c: Cleanups.  Allow to be used in tst-join6.
+       (tf1): Don't print anything after pthread_join returns, this would be
+       another cancellation point.
+       (tf2): Likewise.
+       * tst-join6.c: New file.
+       * Makefile (tests): Add tst-join6.
+
+2006-08-03  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #2892]
+       * pthread_setspecific.c (__pthread_setspecific): Check
+       out-of-range index before checking for unused key.
+
+       * sysdeps/pthread/gai_misc.h: New file.
+
+2006-08-01  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/smp.h: New file.  Old Linux-specific
+       file.  Don't use sysctl.
+       * sysdeps/unix/sysv/linux/smp.h: Always assume SMP.  Archs can
+       overwrite the file if this is likely not true.
+
+2006-07-31  Daniel Jacobowitz  <dan@codesourcery.com>
+
+       * allocatestack.c (__reclaim_stacks): Reset the PID on cached stacks.
+       * Makefile (tests): Add tst-getpid3.
+       * tst-getpid3.c: New file.
+
+2006-07-30  Roland McGrath  <roland@redhat.com>
+
+       * Makefile (libpthread-routines): Add ptw-sigsuspend.
+
+       * sysdeps/unix/sysv/linux/i386/not-cancel.h
+       (pause_not_cancel): New macro.
+       (nanosleep_not_cancel): New macro.
+       (sigsuspend_not_cancel): New macro.
+       * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
+       nanosleep_not_cancel macro from <not-cancel.h>.
+       * pthread_mutex_lock.c (__pthread_mutex_lock): Use pause_not_cancel
+       macro from <not-cancel.h>.
+
+2006-07-28  Ulrich Drepper  <drepper@redhat.com>
+           Jakub Jelinek  <jakub@redhat.com>
+
+       * descr.h: Change ENQUEUE_MUTEX and DEQUEUE_MUTEX for bit 0
+       notification of PI mutex.  Add ENQUEUE_MUTEX_PI.
+       * pthreadP.h: Define PTHREAD_MUTEX_PI_* macros for PI mutex types.
+       * pthread_mutex_setprioceilining.c: Adjust for mutex type name change.
+       * pthread_mutex_init.c: Add support for priority inheritance mutex.
+       * pthread_mutex_lock.c: Likewise.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       * sysdeps/pthread/pthread_cond_broadcast.c: For PI mutexes wake
+       all mutexes.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.c: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.c: Likewise.
+       * sysdeps/unix/sysv/linux/pthread-pi-defines.sym: New file.
+       * sysdeps/unix/sysv/linux/Makefile (gen-as-const-header): Add
+       pthread-pi-defines.sym.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Define FUTEX_LOCK_PI,
+       FUTEX_UNLOCK_PI, and FUTEX_TRYLOCK_PI.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
+       _POSIX_THREAD_PRIO_INHERIT to 200112L.
+       * tst-mutex1.c: Adjust to allow use in PI mutex test.
+       * tst-mutex2.c: Likewise.
+       * tst-mutex3.c: Likewise.
+       * tst-mutex4.c: Likewise.
+       * tst-mutex5.c: Likewise.
+       * tst-mutex6.c: Likewise.
+       * tst-mutex7.c: Likewise.
+       * tst-mutex7a.c: Likewise.
+       * tst-mutex8.c: Likewise.
+       * tst-mutex9.c: Likewise.
+       * tst-robust1.c: Likewise.
+       * tst-robust7.c: Likewise.
+       * tst-robust8.c: Likewise.
+       * tst-mutexpi1.c: New file.
+       * tst-mutexpi2.c: New file.
+       * tst-mutexpi3.c: New file.
+       * tst-mutexpi4.c: New file.
+       * tst-mutexpi5.c: New file.
+       * tst-mutexpi6.c: New file.
+       * tst-mutexpi7.c: New file.
+       * tst-mutexpi7a.c: New file.
+       * tst-mutexpi8.c: New file.
+       * tst-mutexpi9.c: New file.
+       * tst-robust1.c: New file.
+       * tst-robust2.c: New file.
+       * tst-robust3.c: New file.
+       * tst-robust4.c: New file.
+       * tst-robust5.c: New file.
+       * tst-robust6.c: New file.
+       * tst-robust7.c: New file.
+       * tst-robust8.c: New file.
+       * Makefile (tests): Add the new tests.
+
+       * pthread_create.c (start_thread): Add some casts to avoid warnings.
+       * pthread_mutex_destroy.c: Remove unneeded label.
+
+2006-07-01  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_mutex_init.c (__pthread_mutex_init): Move some
+       computations to compile time.
+
+2006-06-04  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h: Add pthread_equal inline version.
+
+2006-05-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/fork.h: Mark __fork_handlers as hidden.
+
+2006-05-11  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_key_create.c (__pthread_key_create): Do away with
+       __pthread_keys_lock.
+
+       * sysdeps/unix/sysv/linux/pthread_setaffinity.c
+       (__kernel_cpumask_size): Mark as hidden.
+       * sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c: Likewise.
+
+       * sem_open.c (__sem_mappings_lock): Mark as hidden.
+       * semaphoreP.h (__sem_mappings_lock): Likewise.
+
+2006-05-10  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_atfork.c: Mark __dso_handle as hidden.
+
+2006-05-09  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #2644]
+       * sysdeps/pthread/unwind-forcedunwind.c: Different solution for
+       the reload problem.  Change the one path in pthread_cancel_init
+       which causes the problem.  Force gcc to reload.  Simplify callers.
+       * sysdeps/unix/sysv/linux/ia64/unwind-forcedunwind.c
+       (_Unwind_GetBSP): Undo last patch.
+
+2006-05-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/ia64/unwind-forcedunwind.c: Make sure the
+       function pointer is reloaded after pthread_cancel_init calls.
+
+       [BZ #2644]
+       * sysdeps/pthread/unwind-forcedunwind.c: Make sure functions
+       pointers are reloaded after pthread_cancel_init calls.
+
+2006-05-01  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/allocalim.h (__libc_use_alloca): Mark with
+       __always_inline.
+
+2006-04-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/timer_routines.c (timer_helper_thread):
+       Allocate new object which is passed to timer_sigev_thread so that
+       the timer can be deleted before the new thread is scheduled.
+
+2006-04-26  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/x86_64/tls.h: Include <asm/prctl.h> inside [! __ASSEMBLER__].
+
+2006-04-08  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Remove branch predicion
+       suffix for conditional jumps.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Likewise.
+
+       * init.c (sigcancel_handler): Compare with correct PID even if the
+       thread is in the middle of a fork call.
+       (sighandler_setxid): Likewise.
+       Reported by Suzuki K P <suzuki@in.ibm.com> .
+
+2006-04-07  Jakub Jelinek  <jakub@redhat.com>
+
+       * pthreadP.h (FUTEX_TID_MASK): Sync with kernel.
+
+2006-04-06  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_getattr_np.c (pthread_getattr_np): Close fp if getrlimit
+       fails [Coverity CID 105].
+
+2006-04-05  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h: Add nonnull attributes.
+
+2006-04-03  Steven Munroe  <sjmunroe@us.ibm.com>
+
+       [BZ #2505]
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h [_ARCH_PWR4]:
+       Define __lll_rel_instr using lwsync.
+
+2006-03-27  Ulrich Drepper  <drepper@redhat.com>
+
+       * allocatestack.c (allocate_stack): Always initialize robust_head.
+       * descr.h: Define struct robust_list_head.
+       (struct pthread): Use robust_list_head in robust mutex list definition.
+       Adjust ENQUEUE_MUTEX and DEQUEUE_MUTEX.
+       * init.c [!__ASSUME_SET_ROBUST_LIST] (__set_robust_list_avail): Define.
+       (__pthread_initialize_minimal_internal): Register robust_list with
+       the kernel.
+       * pthreadP.h: Remove PRIVATE_ from PTHREAD_MUTEX_ROBUST_* names.
+       Declare __set_robust_list_avail.
+       * pthread_create.c (start_thread): Register robust_list of new thread.
+       [!__ASSUME_SET_ROBUST_LIST]: If robust_list is not empty wake up
+       waiters.
+       * pthread_mutex_destroy.c: For robust mutexes don't look at the
+       number of users, it's unreliable.
+       * pthread_mutex_init.c: Allow use of pshared robust mutexes if
+       set_robust_list syscall is available.
+       * pthread_mutex_consistent.c: Adjust for PTHREAD_MUTEX_ROBUST_* rename.
+       * pthread_mutex_lock.c: Simplify robust mutex code a bit.
+       Set robust_head.list_op_pending before trying to lock a robust mutex.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise for unlocking.
+       * Makefile (tests): Add tst-robust8.
+       * tst-robust8.c: New file.
+
+2006-03-08  Andreas Schwab  <schwab@suse.de>
+
+       * sysdeps/unix/sysv/linux/ia64/dl-sysdep.h
+       (DL_SYSINFO_IMPLEMENTATION): Add missing newline.
+
+2006-03-05  Roland McGrath  <roland@redhat.com>
+
+       * configure (libc_add_on): Disable add-on when $add_ons_automatic = yes
+       and $config_os doesn't match *linux*.
+
+2006-03-05  David S. Miller  <davem@sunset.davemloft.net>
+
+       * sysdeps/unix/sysv/linux/sparc/sparc32/pt-vfork.S:
+       Use __syscall_error.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc64/pt-vfork.S: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep-cancel.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc64/vfork.S: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/Makefile: New file.
+
+2006-03-02  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/aio_misc.h: Various cleanups.
+
+2006-03-01  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
+       (__lll_robust_lock_wait): Also set FUTEX_WAITERS bit if we got the
+       mutex.
+       (__lll_robust_timedlock_wait): Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
+       (__lll_robust_lock_wait): Likewise.
+       (__lll_robust_timedlock_wait): Likewise.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.c
+       (__lll_robust_lock_wait): Likewise.
+       (__lll_robust_timedlock_wait): Likewise.
+
+2006-03-01  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_robust_mutex_dead,
+       lll_robust_mutex_trylock, lll_robust_mutex_lock,
+       lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
+       lll_robust_mutex_unlock): Define.
+       (__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
+
+2006-02-28  H.J. Lu  <hongjiu.lu@intel.com>
+
+       * sysdeps/unix/sysv/linux/ia64/clone2.S: Include <clone2.S>
+       instead of <clone.S>.
+
+2006-02-27  Jakub Jelinek  <jakub@redhat.com>
+
+       * Makefile (libpthread-routines): Add
+       pthread_mutexattr_[sg]etprotocol, pthread_mutexattr_[sg]etprioceiling
+       and pthread_mutex_[sg]etprioceiling.
+       * Versions (GLIBC_2.4): Export pthread_mutexattr_getprotocol,
+       pthread_mutexattr_setprotocol, pthread_mutexattr_getprioceiling,
+       pthread_mutexattr_setprioceiling, pthread_mutex_getprioceiling and
+       pthread_mutex_setprioceiling.
+       * sysdeps/pthread/pthread.h (PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT,
+       PTHREAD_PRIO_PROTECT): New enum values.
+       (pthread_mutexattr_getprotocol, pthread_mutexattr_setprotocol,
+       pthread_mutexattr_getprioceiling, pthread_mutexattr_setprioceiling,
+       pthread_mutex_getprioceiling, pthread_mutex_setprioceiling): New
+       prototypes.
+       * pthreadP.h (PTHREAD_MUTEX_PRIO_INHERIT_PRIVATE_NP,
+       PTHREAD_MUTEX_PRIO_PROTECT_PRIVATE_NP): New enum values.
+       (PTHREAD_MUTEX_PRIO_CEILING_SHIFT, PTHREAD_MUTEX_PRIO_CEILING_MASK):
+       Define.
+       (PTHREAD_MUTEXATTR_PROTOCOL_SHIFT, PTHREAD_MUTEXATTR_PROTOCOL_MASK,
+       PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT,
+       PTHREAD_MUTEXATTR_PRIO_CEILING_MASK): Define.
+       (PTHREAD_MUTEXATTR_FLAG_BITS): Or in PTHREAD_MUTEXATTR_PROTOCOL_MASK
+       and PTHREAD_MUTEXATTR_PRIO_CEILING_MASK.
+       * pthread_mutex_init.c (__pthread_mutex_init): For the time being
+       return ENOTSUP for PTHREAD_PRIO_INHERIT or PTHREAD_PRIO_PROTECT
+       protocol mutexes.
+       * pthread_mutex_getprioceiling.c: New file.
+       * pthread_mutex_setprioceiling.c: New file.
+       * pthread_mutexattr_getprioceiling.c: New file.
+       * pthread_mutexattr_setprioceiling.c: New file.
+       * pthread_mutexattr_getprotocol.c: New file.
+       * pthread_mutexattr_setprotocol.c: New file.
+
+2006-02-27  Daniel Jacobowitz  <dan@codesourcery.com>
+
+       * sysdeps/unix/sysv/linux/aio_misc.h: Include <limits.h>.
+
+2006-02-27  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/pthread/Subdirs: List nptl here too.
+       * configure (libc_add_on_canonical): New variable.
+
+       * sysdeps/unix/sysv/linux/sh/sh4/lowlevellock.h: Use #include_next.
+
+       * sysdeps/unix/sysv/linux/sleep.c: Use #include_next after #include of
+       self to get main source tree's file.
+       * sysdeps/unix/sysv/linux/alpha/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/i386/vfork.S: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/clone2.S: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/s390/s390-32/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/s390/s390-64/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/sh/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/sparc64/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/clone.S: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/vfork.S: Likewise.
+
+       * Makefile: Use $(sysdirs) in vpath directive.
+
+       * sysdeps/pthread/Makefile (CFLAGS-libc-start.c): Variable removed.
+       (CPPFLAGS-timer_routines.c): Likewise.
+
+       * Makeconfig (includes): Variable removed.
+
+2006-02-26  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/generic/pt-raise.c: Moved to ...
+       * pt-raise.c: ... here.
+       * sysdeps/generic/lowlevellock.h: Moved to ...
+       * lowlevellock.h: ... here.
+
+2006-02-23  Roland McGrath  <roland@redhat.com>
+
+       * descr.h (struct pthread): Add final member `end_padding'.
+       (PTHREAD_STRUCT_END_PADDING): Use it.
+
+2006-02-20  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/mips: Directory removed, saved in ports repository.
+       * sysdeps/unix/sysv/linux/mips: Likewise.
+
+2006-02-18  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-robust1.c: Add second mutex to check that the mutex list is
+       handled correctly.
+
+2006-02-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_robust_mutex_dead,
+       lll_robust_mutex_trylock, lll_robust_mutex_lock,
+       lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
+       lll_robust_mutex_unlock): New macros.
+       (__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.c: New file.
+
+2006-02-17  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Add lll_robust_mutex_*
+       definitions.
+       * sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S: New file.
+
+2006-02-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+       (lll_robust_mutex_unlock): Avoid unnecessary wakeups.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h
+       (lll_robust_mutex_unlock): Likewise.
+
+2006-02-13  Jakub Jelinek  <jakub@redhat.com>
+
+       * descr.h [!__PTHREAD_MUTEX_HAVE_PREV] (DEQUEUE_MUTEX):
+       Set robust_list.__next rather than robust_list.
+       * sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
+       (__pthread_list_t): New typedef.
+       (pthread_mutex_t): Replace __next and __prev fields with __list.
+       * sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h
+       (__pthread_list_t): New typedef.
+       (pthread_mutex_t): Replace __next and __prev fields with __list.
+       * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
+       (__pthread_list_t, __pthread_slist_t): New typedefs.
+       (pthread_mutex_t): Replace __next and __prev fields with __list.
+       * sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h
+       (__pthread_list_t, __pthread_slist_t): New typedefs.
+       (pthread_mutex_t): Replace __next and __prev fields with __list.
+       * sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
+       (__pthread_list_t, __pthread_slist_t): New typedefs.
+       (pthread_mutex_t): Replace __next and __prev fields with __list.
+       * sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
+       (__pthread_slist_t): New typedef.
+       (pthread_mutex_t): Replace __next field with __list.
+
+2006-02-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthreadP.h: Define PTHREAD_MUTEX_INCONSISTENT instead of
+       PTHREAD_MUTEX_OWNERDEAD.
+       (PTHREAD_MUTEX_ROBUST_PRIVATE_NP): Define as 16, not 256.
+       Define FUTEX_WAITERS, FUTEX_OWNER_DIED, FUTEX_TID_MASK.
+       * Makefile (libpthread-routines): Add lowlevelrobustlock.
+       * pthread_create.c (start_thread): Very much simplify robust_list loop.
+       * pthread_mutex_consistent.c: Inconsistent mutex have __owner now set
+       to PTHREAD_MUTEX_INCONSISTENT.
+       * pthread_mutex_destroy.c: Allow destroying of inconsistent mutexes.
+       * pthread_mutex_lock.c: Reimplement robust mutex handling.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c: Likewise.
+       * sysdeps/unix/sysv/linux/Makefile (gen-as-const-headers): Add
+       lowlevelrobustlock.sym.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.sym: New file.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Add lll_robust_mutex_*
+       definitions.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: New file.
+       * sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S: New file.
+       * sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S: New file.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: New file.
+
+2006-02-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * allocatestack.c (allocate_stack): Initialize robust_list.
+       * init.c (__pthread_initialize_minimal_internal): Likewise.
+       * descr.h (struct xid_command): Pretty printing.
+       (struct pthread): Use __pthread_list_t or __pthread_slist_t for
+       robust_list.  Adjust macros.
+       * pthread_create.c (start_thread): Adjust robust_list handling.
+       * phtread_mutex_unlock.c: Don't allow unlocking from any thread
+       but the owner for all robust mutex types.
+       * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Define
+       __pthread_list_t and __pthread_slist_t.  Use them in pthread_mutex_t.
+       * sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h: Likewise.
+       * sysdeps/pthread/pthread.h: Adjust mutex initializers.
+
+       * sysdeps/unix/sysv/linux/i386/not-cancel.h: Define openat_not_cancel,
+       openat_not_cancel_3, openat64_not_cancel, and openat64_not_cancel_3.
+
+2006-02-08  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_futex_wait,
+       lll_futex_timedwait, lll_wait_tid): Add "memory" clobber.
+
+2006-01-20  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.h (lll_futex_wait):
+       Return status.
+       (lll_futex_timed_wait): Define.
+
+2006-01-19  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-cancel4.c: Test ppoll.
+
+2006-01-18  Andreas Jaeger  <aj@suse.de>
+
+       [BZ #2167]
+       * sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h
+       (pthread_mutex_t): Follow changes for other archs.  Based on patch
+       by Jim Gifford <patches@jg555.com>.
+
+2006-01-13  Richard Henderson  <rth@redhat.com>
+
+       * sysdeps/alpha/tls.h (tcbhead_t): Rename member to __private.
+
+2006-01-10  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/alpha/jmpbuf-unwind.h: File moved to main source tree.
+       * sysdeps/i386/jmpbuf-unwind.h: Likewise.
+       * sysdeps/mips/jmpbuf-unwind.h: Likewise.
+       * sysdeps/powerpc/jmpbuf-unwind.h: Likewise.
+       * sysdeps/s390/jmpbuf-unwind.h: Likewise.
+       * sysdeps/sh/jmpbuf-unwind.h: Likewise.
+       * sysdeps/sparc/sparc32/jmpbuf-unwind.h: Likewise.
+       * sysdeps/sparc/sparc64/jmpbuf-unwind.h: Likewise.
+       * sysdeps/x86_64/jmpbuf-unwind.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/jmpbuf-unwind.h: Likewise.
+
+2006-01-09  Roland McGrath  <roland@redhat.com>
+
+       * tst-initializers1-c89.c: New file.
+       * tst-initializers1-c99.c: New file.
+       * tst-initializers1-gnu89.c: New file.
+       * tst-initializers1-gnu99.c: New file.
+       * Makefile (tests): Add them.
+       (CFLAGS-tst-initializers1-c89.c): New variable.
+       (CFLAGS-tst-initializers1-c99.c): New variable.
+       (CFLAGS-tst-initializers1-gnu89.c): New variable.
+       (CFLAGS-tst-initializers1-gnu99.c): New variable.
+
+       * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h (pthread_mutex_t):
+       Use __extension__ on anonymous union definition.
+       * sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h: Likewise.
+
+2006-01-08  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h (pthread_mutex_t):
+       Don't give the union a name because it changes the mangled name.
+       Instead name the struct for __data.
+       * sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h (pthread_mutex_t):
+       Likewise.
+       * sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h (pthread_mutex_t):
+       Likewise.
+
+2006-01-09  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/sparc/sparc64/jmpbuf-unwind.h (_JMPBUF_UNWINDS_ADJ): Add
+       stack bias to mc_ftp field.
+
+2006-01-07  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/aio_misc.h (AIO_MISC_WAIT): Work around gcc
+       being too clever and reloading the futex value where it shouldn't.
+
+2006-01-06  Ulrich Drepper  <drepper@redhat.com>
+
+       * descr.h [!__PTHREAD_MUTEX_HAVE_PREV] (DEQUEUE_MUTEX): Use
+       correct type.
+
+2006-01-06  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep-cancel.h (PSEUDO):
+       Add cfi directives.
+
+2006-01-06  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/ia64/tls.h (tcbhead_t): Rename private member to __private.
+       * sysdeps/ia64/tcb-offsets.sym: Adjust for private->__private
+       rename in tcbhead_t.
+
+       * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h (pthread_mutex_t):
+       Don't give the union a name because it changes the mangled name.
+       Instead name the struct for __data.
+       * sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h: Likewise.
+       * pthread_create.c (start_thread): Adjust robust mutex free loop.
+       * descr.h (ENQUEUE_MUTEX, DEQUEUE_MUTEX): Adjust.
+
+2006-01-05  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_futex_wait):
+       Return status.
+       (lll_futex_timed_wait): Define.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+       * sysdeps/pthread/aio_misc.h: New file.
+
+2006-01-03  Joseph S. Myers  <joseph@codesourcery.com>
+
+       * Makefile ($(objpfx)$(multidir)): Use mkdir -p.
+
+2006-01-03  Steven Munroe  <sjmunroe@us.ibm.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h
+       (PSEUDO): Remove redundant cfi_startproc and cfi_endproc directives.
+       * sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h: Likewise.
+
+2006-01-04  Ulrich Drepper  <drepper@redhat.com>
+
+       * tst-cancel24.cc: Use C headers instead of C++ headers.
+
+2006-01-03  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Remove #error for
+       sparc-linux configured glibc.
+       (lll_futex_wake_unlock): Define to 1 for sparc-linux configured glibc.
+       (__lll_mutex_trylock, __lll_mutex_cond_trylock, __lll_mutex_lock,
+       __lll_mutex_cond_lock, __lll_mutex_timedlock): Use
+       atomic_compare_and_exchange_val_24_acq instead of
+       atomic_compare_and_exchange_val_acq.
+       (lll_mutex_unlock, lll_mutex_unlock_force): Use atomic_exchange_24_rel
+       instead of atomic_exchange_rel.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c: New
+       file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c: New
+       file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_init.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c:
+       New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
+       New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_init.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_post.c: New file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_timedwait.c: New
+       file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_trywait.c: New
+       file.
+       * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/sem_wait.c: New file.
+
+2006-01-03  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/pthread.h [__WORDSIZE==64]: Don't use cast in
+       mutex initializers.
+
+2006-01-02  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/sparc/tls.h (tcbhead_t): Add pointer_guard field.
+       (THREAD_GET_POINTER_GUARD, THREAD_SET_POINTER_GUARD,
+       THREAD_COPY_POINTER_GUARD): Define.
+       * sysdeps/sparc/tcb-offsets.sym (POINTER_GUARD): Define.
+       * sysdeps/sparc/sparc64/jmpbuf-unwind.h: Revert 2005-12-27 changes.
+
+2006-01-01  Ulrich Drepper  <drepper@redhat.com>
+
+       * version.c: Update copyright year.
+
+2005-12-29  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/sysdep-cancel.h: Remove explicit
+       .eh_frame section, use cfi_* directives.
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S: Add cfi instrumentation.
+
+2005-12-30  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/ia64/jmpbuf-unwind.h: Undo last change for
+       now.
+
+2005-12-29  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/sigaction.c: Removed.
+       * sigaction.c: New file.
+       * sysdeps/unix/sysv/linux/Makefile: Define CFLAGS-sigaction.c.
+
+2005-12-28  Ulrich Drepper  <drepper@redhat.com>
+
+       * Makefile (tests): Add tst-signal7.
+       * tst-signal7.c: New file.
+
+2005-12-27  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/x86_64/jmpbuf-unwind.h (_jmpbuf_sp): New inline function.
+       (_JMPBUF_UNWINDS_ADJ): Use it, to PTR_DEMANGLE before comparison.
+       * sysdeps/alpha/jmpbuf-unwind.h: Likewise.
+       * sysdeps/i386/jmpbuf-unwind.h: Likewise.
+       * sysdeps/mips/jmpbuf-unwind.h: Likewise.
+       * sysdeps/powerpc/jmpbuf-unwind.h: Likewise.
+       * sysdeps/s390/jmpbuf-unwind.h: Likewise.
+       * sysdeps/sh/jmpbuf-unwind.h: Likewise.
+       * sysdeps/sparc/sparc32/jmpbuf-unwind.h: Likewise.
+       * sysdeps/sparc/sparc64/jmpbuf-unwind.h: Likewise.
+       * sysdeps/unix/sysv/linux/ia64/jmpbuf-unwind.h: Likewise.
+
+2005-12-27  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h: Add __next
+       and __prev field to pthread_mutex_t.
+       * sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h: Likewise.
+       * sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h: Add __next field
+       to pthread_mutex_t.
+
+2005-12-26  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthreadP.h: Define PTHREAD_MUTEX_ROBUST_PRIVATE_NP,
+       PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP,
+       PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP,
+       PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP,
+       PTHREAD_MUTEXATTR_FLAG_ROBUST, PTHREAD_MUTEXATTR_FLAG_PSHARED,
+       and PTHREAD_MUTEXATTR_FLAG_BITS.
+       * descr.h (struct pthread): Add robust_list field and define
+       ENQUEUE_MUTEX and DEQUEUE_MUTEX macros.
+       * pthread_mutexattr_getrobust.c: New file.
+       * pthread_mutexattr_setrobust.c: New file.
+       * pthread_mutex_consistent.c: New file.
+       * sysdeps/pthread/pthread.h: Declare pthread_mutexattr_getrobust,
+       pthread_mutexattr_setrobust, and pthread_mutex_consistent.
+       Define PTHREAD_MUTEX_STALLED_NP and PTHREAD_MUTEX_ROBUST_NP.
+       Adjust pthread_mutex_t initializers.
+       * nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Add __next
+       field to pthread_mutex_t.
+       * nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h: Add __next
+       and __prev field to pthread_mutex_t.
+       * Versions [GLIBC_2.4]: Export pthread_mutexattr_getrobust_np,
+       pthread_mutexattr_setrobust_np, and pthread_mutex_consistent_np.
+       * pthread_mutexattr_getpshared.c: Use PTHREAD_MUTEXATTR_FLAG_PSHARED
+       and PTHREAD_MUTEXATTR_FLAG_BITS macros instead of magic numbers.
+       * pthread_mutexattr_gettype.c: Likewise.
+       * pthread_mutexattr_setpshared.c: Likewise.
+       * pthread_mutexattr_settype.c: Likewise.
+       * pthread_mutex_init.c: Reject robust+pshared attribute for now.
+       Initialize mutex kind according to robust flag.
+       * pthread_mutex_lock.c: Implement local robust mutex.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       * pthread_create.c (start_thread): Mark robust mutexes which remained
+       locked as dead.
+       * tst-robust1.c: New file.
+       * tst-robust2.c: New file.
+       * tst-robust3.c: New file.
+       * tst-robust4.c: New file.
+       * tst-robust5.c: New file.
+       * tst-robust6.c: New file.
+       * tst-robust7.c: New file.
+       * Makefile (libpthread-routines): Add pthread_mutexattr_getrobust,
+       pthread_mutexattr_setrobust, and pthread_mutex_consistent.
+       (tests): Add tst-robust1, tst-robust2, tst-robust3, tst-robust4,
+       tst-robust5, tst-robust6, and tst-robust7.
+
+       * tst-typesizes.c: New file.
+       * Makefile (tests): Add tst-typesizes.
+
+       * tst-once3.c: More debug output.
+
+2005-12-24  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_mutex_trylock.c (__pthread_mutex_trylock): Add break
+       missing after last change.
+
+       * version.c: Update copyright year.
+
+2005-12-23  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_mutex_destroy.c: Set mutex type to an invalid value.
+       * pthread_mutex_lock.c: Return EINVAL for invalid mutex type.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+
+2005-12-22  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/pthread/sigaction.c: Use "" instead of <> to include self,
+       so that #include_next's search location is not reset to the -I..
+       directory where <nptl/...> can be found.
+
+2005-12-22  Ulrich Drepper  <drepper@redhat.com>
+
+       [BZ #1913]
+       * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S (__new_sem_wait):
+       Fix unwind info.  Remove useless branch prediction prefix.
+       * tst-cancel24.cc: New file.
+       * Makefile: Add rules to build and run tst-cancel24.
+
+2005-12-21  Roland McGrath  <roland@redhat.com>
+
+       * libc-cancellation.c: Use <> rather than "" #includes.
+       * pt-cleanup.c: Likewise.
+       * pthread_create.c: Likewise.
+       * pthread_join.c: Likewise.
+       * pthread_timedjoin.c: Likewise.
+       * pthread_tryjoin.c: Likewise.
+       * sysdeps/unix/sysv/linux/libc_pthread_init.c: Likewise.
+       * sysdeps/unix/sysv/linux/register-atfork.c: Likewise.
+       * sysdeps/unix/sysv/linux/unregister-atfork.c: Likewise.
+       * unwind.c: Likewise.
+
+2005-12-19  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/sh/tcb-offsets.sym: Add POINTER_GUARD.
+       * sysdeps/sh/tls.h (tcbhead_t): Remove private and add pointer_guard.
+       (THREAD_GET_POINTER_GUARD, THREAD_SET_POINTER_GUARD,
+       THREAD_COPY_POINTER_GUARD): Define.
+
+2005-12-19  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/ia64/tls.h (TLS_PRE_TCB_SIZE): Make room for 2 uintptr_t's
+       rather than one.
+       (THREAD_GET_POINTER_GUARD, THREAD_SET_POINTER_GUARD,
+       THREAD_COPY_POINTER_GUARD): Define.
+       * sysdeps/powerpc/tcb-offsets.sym (POINTER_GUARD): Add.
+       * sysdeps/powerpc/tls.h (tcbhead_t): Add pointer_guard field.
+       (THREAD_GET_POINTER_GUARD, THREAD_SET_POINTER_GUARD,
+       THREAD_COPY_POINTER_GUARD): Define.
+       * sysdeps/s390/tcb-offsets.sym (STACK_GUARD): Add.
+       * sysdeps/s390/tls.h (THREAD_GET_POINTER_GUARD,
+       THREAD_SET_POINTER_GUARD, THREAD_COPY_POINTER_GUARD): Define.
+       * sysdeps/unix/sysv/linux/ia64/__ia64_longjmp.S (__ia64_longjmp):
+       Use PTR_DEMANGLE for B0 if defined.
+
+2005-12-17  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthread_create.c (__pthread_create_2_1): Use
+       THREAD_COPY_POINTER_GUARD if available.
+       * sysdeps/i386/tcb-offsets.sym: Add POINTER_GUARD.
+       * sysdeps/x86_64/tcb-offsets.sym: Likewise.
+       * sysdeps/i386/tls.h (tcbhead_t): Add pointer_guard.
+       Define THREAD_SET_POINTER_GUARD and THREAD_COPY_POINTER_GUARD.
+       * sysdeps/x86_64/tls.h: Likewise.
+
+2005-12-15  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/unix/sysv/linux/mq_notify.c: Don't use sysdeps/generic.
+
+2005-12-13  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/pthread/sigfillset.c: Adjust for files moved out of
+       sysdeps/generic.
+       * errno-loc.c: New file.
+
+2005-12-12  Roland McGrath  <roland@redhat.com>
+
+       * init.c (__pthread_initialize_minimal_internal): Do __static_tls_size
+       adjustments before choosing stack size.  Update minimum stack size
+       calculation to match allocate_stack change.
+
+2005-12-12  Ulrich Drepper  <drepper@redhat.com>
+
+       * allocatestack.c (allocate_stack): Don't demand that there is an
+       additional full page available on the stack beside guard, TLS, the
+       minimum stack.
+
+2005-11-24  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
+       (__cleanup_fct_attribute): Use __regparm__ not regparm.
+
+       * sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h: When
+       compiling 32-bit code we must define __cleanup_fct_attribute.
+
+005-11-24  Jakub Jelinek  <jakub@redhat.com>
+
+       [BZ #1920]
+       * sysdeps/pthread/pthread.h (__pthread_unwind_next): Use
+       __attribute__ instead of __attribute.
+       * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
+       (__cleanup_fct_attribute): Likewise.
+
+2005-11-17  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/pthread/unwind-forcedunwind.c (pthread_cancel_init): Put
+       a write barrier before writing libgcc_s_getcfa.
+
+2005-11-06  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/configure: Removed.
+
+2005-11-05  Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/ia64/pt-initfini.c: Remove trace of
+       optional init_array/fini_array support.
+
+2005-10-24  Roland McGrath  <roland@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Remove unnecessary
+       versioned_symbol use.
+
+2005-10-16  Roland McGrath  <roland@redhat.com>
+
+       * init.c (__pthread_initialize_minimal_internal): Even when using a
+       compile-time default stack size, apply the minimum that allocate_stack
+       will require, and round up to page size.
+
+2005-10-10  Daniel Jacobowitz  <dan@codesourcery.com>
+
+       * Makefile ($(test-modules)): Remove static pattern rule.
+
+2005-10-14  Jakub Jelinek  <jakub@redhat.com>
+           Ulrich Drepper  <drepper@redhat.com>
+
+       * sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Fix stack
+       alignment in callback function.
+       * Makefile: Add rules to build and run tst-align3.
+       * tst-align3.c: New file.
+
+2005-10-03  Jakub Jelinek  <jakub@redhat.com>
+
+       * allocatestack.c (setxid_signal_thread): Add
+       INTERNAL_SYSCALL_DECL (err).
+
+2005-10-02  Jakub Jelinek  <jakub@redhat.com>
+
+       * allocatestack.c (setxid_signal_thread): Need to use
+       atomic_compare_and_exchange_bool_acq.
+
+2005-10-01  Ulrich Drepper  <drepper@redhat.com>
+           Jakub Jelinek  <jakub@redhat.com>
+
+       * descr.h: Define SETXID_BIT and SETXID_BITMASK.  Adjust
+       CANCEL_RESTMASK.
+       (struct pthread): Move specific_used field to avoid padding.
+       Add setxid_futex field.
+       * init.c (sighandler_setxid): Reset setxid flag and release the
+       setxid futex.
+       * allocatestack.c (setxid_signal_thread): New function.  Broken
+       out of the bodies of the two loops in __nptl_setxid.  For undetached
+       threads check whether they are exiting and if yes, don't send a signal.
+       (__nptl_setxid): Simplify loops by using setxid_signal_thread.
+       * pthread_create.c (start_thread): For undetached threads, check
+       whether setxid bit is set.  If yes, wait until signal has been
+       processed.
+
+       * allocatestack.c (STACK_VARIABLES): Initialize them.
+       * pthread_create.c (__pthread_create_2_1): Initialize pd.
+
+2004-09-02  Jakub Jelinek  <jakub@redhat.com>
+
+       * pthread_cond_destroy.c (__pthread_cond_destroy): If there are
+       waiters, awake all waiters on the associated mutex.
+
+2005-09-22  Roland McGrath  <roland@redhat.com>
+
+       * perf.c [__x86_64__] (HP_TIMING_NOW): New macro (copied from
+       ../sysdeps/x86_64/hp-timing.h).
+
+2005-08-29  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h (FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (lll_futex_wake_unlock): Define.
+       * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (lll_futex_wake_unlock): Define.
+       * sysdeps/unix/sysv/linux/ia64/lowlevellock.h (FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (lll_futex_wake_unlock): Define.
+       * sysdeps/unix/sysv/linux/s390/lowlevellock.h (FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (lll_futex_wake_unlock): Define.
+       * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (FUTEX_WAKE_OP,
+       FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (lll_futex_wake_unlock): Define.
+       * sysdeps/pthread/pthread_cond_signal.c (__pthread_cond_signal): Use
+       lll_futex_wake_unlock.
+       * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
+       (FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (__pthread_cond_signal): Use FUTEX_WAKE_OP.
+       * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+       (FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+       (__pthread_cond_signal): Use FUTEX_WAKE_OP.
+
+2005-09-05  Kaz Kojima  <kkojima@rr.iij4u.or.jp>
+
+       * sysdeps/unix/sysv/linux/sh/lowlevellock.S (__lll_mutex_lock_wait):
+       Fix typo in register name.
+
 2005-08-23  Ulrich Drepper  <drepper@redhat.com>
 
        * sysdeps/unix/sysv/linux/timer_routines.c (timer_helper_thread):
diff --git a/libpthread/nptl/DESIGN-barrier.txt b/libpthread/nptl/DESIGN-barrier.txt
new file mode 100644 (file)
index 0000000..23463c6
--- /dev/null
@@ -0,0 +1,44 @@
+Barriers pseudocode
+===================
+
+    int pthread_barrier_wait(barrier_t *barrier);
+
+struct barrier_t {
+
+   unsigned int lock:
+         - internal mutex
+
+   unsigned int left;
+         - current barrier count, # of threads still needed.
+
+   unsigned int init_count;
+         - number of threads needed for the barrier to continue.
+
+   unsigned int curr_event;
+         - generation count
+}
+
+pthread_barrier_wait(barrier_t *barrier)
+{
+  unsigned int event;
+  result = 0;
+
+  lll_lock(barrier->lock);
+  if (!--barrier->left) {
+    barrier->curr_event++;
+    futex_wake(&barrier->curr_event, INT_MAX)
+
+    result = BARRIER_SERIAL_THREAD;
+  } else {
+    event = barrier->curr_event;
+    lll_unlock(barrier->lock);
+    do {
+      futex_wait(&barrier->curr_event, event)
+    } while (event == barrier->curr_event);
+  }
+
+  if (atomic_increment_val (barrier->left) == barrier->init_count)
+    lll_unlock(barrier->lock);
+
+  return result;
+}
diff --git a/libpthread/nptl/DESIGN-condvar.txt b/libpthread/nptl/DESIGN-condvar.txt
new file mode 100644 (file)
index 0000000..4845251
--- /dev/null
@@ -0,0 +1,134 @@
+Conditional Variable pseudocode.
+================================
+
+       int pthread_cond_timedwait (pthread_cond_t *cv, pthread_mutex_t *mutex);
+       int pthread_cond_signal    (pthread_cond_t *cv);
+       int pthread_cond_broadcast (pthread_cond_t *cv);
+
+struct pthread_cond_t {
+
+   unsigned int cond_lock;
+
+         internal mutex
+
+   uint64_t total_seq;
+
+     Total number of threads using the conditional variable.
+
+   uint64_t wakeup_seq;
+
+     sequence number for next wakeup.
+
+   uint64_t woken_seq;
+
+     sequence number of last woken thread.
+
+   uint32_t broadcast_seq;
+
+}
+
+
+struct cv_data {
+
+   pthread_cond_t *cv;
+
+   uint32_t bc_seq
+
+}
+
+
+
+cleanup_handler(cv_data)
+{
+  cv = cv_data->cv;
+  lll_lock(cv->lock);
+
+  if (cv_data->bc_seq == cv->broadcast_seq) {
+    ++cv->wakeup_seq;
+    ++cv->woken_seq;
+  }
+
+  /* make sure no signal gets lost.  */
+  FUTEX_WAKE(cv->wakeup_seq, ALL);
+
+  lll_unlock(cv->lock);
+}
+
+
+cond_timedwait(cv, mutex, timeout):
+{
+   lll_lock(cv->lock);
+   mutex_unlock(mutex);
+
+   cleanup_push
+
+   ++cv->total_seq;
+   val = seq =  cv->wakeup_seq;
+   cv_data.bc = cv->broadcast_seq;
+   cv_data.cv = cv;
+
+   while (1) {
+
+     lll_unlock(cv->lock);
+
+     enable_async(&cv_data);
+
+     ret = FUTEX_WAIT(cv->wakeup_seq, val, timeout);
+
+     restore_async
+
+     lll_lock(cv->lock);
+
+     if (bc != cv->broadcast_seq)
+       goto bc_out;
+
+     val = cv->wakeup_seq;
+
+     if (val != seq && cv->woken_seq != val) {
+       ret = 0;
+       break;
+     }
+
+     if (ret == TIMEDOUT) {
+       ++cv->wakeup_seq;
+       break;
+     }
+   }
+
+   ++cv->woken_seq;
+
+ bc_out:
+   lll_unlock(cv->lock);
+
+   cleanup_pop
+
+   mutex_lock(mutex);
+
+   return ret;
+}
+
+cond_signal(cv)
+{
+   lll_lock(cv->lock);
+
+   if (cv->total_seq > cv->wakeup_seq) {
+     ++cv->wakeup_seq;
+     FUTEX_WAKE(cv->wakeup_seq, 1);
+   }
+
+   lll_unlock(cv->lock);
+}
+
+cond_broadcast(cv)
+{
+   lll_lock(cv->lock);
+
+   if (cv->total_seq > cv->wakeup_seq) {
+     cv->wakeup_seq = cv->total_seq;
+     cv->woken_seq = cv->total_seq;
+     ++cv->broadcast_seq;
+     FUTEX_WAKE(cv->wakeup_seq, ALL);
+   }
+
+   lll_unlock(cv->lock);
+}
diff --git a/libpthread/nptl/DESIGN-rwlock.txt b/libpthread/nptl/DESIGN-rwlock.txt
new file mode 100644 (file)
index 0000000..810d1b8
--- /dev/null
@@ -0,0 +1,113 @@
+Reader Writer Locks pseudocode
+==============================
+
+       pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+       pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
+       pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
+
+struct pthread_rwlock_t {
+
+   unsigned int lock:
+         - internal mutex
+
+   unsigned int writers_preferred;
+         - locking mode: 0 recursive, readers preferred
+                         1 nonrecursive, writers preferred
+
+   unsigned int readers;
+         - number of read-only references various threads have
+
+   pthread_t writer;
+         - descriptor of the writer or 0
+
+   unsigned int readers_wakeup;
+         - 'all readers should wake up' futex.
+
+   unsigned int writer_wakeup;
+         - 'one writer should wake up' futex.
+
+   unsigned int nr_readers_queued;
+         - number of readers queued up.
+
+   unsigned int nr_writers_queued;
+         - number of writers queued up.
+}
+
+pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
+{
+  lll_lock(rwlock->lock);
+  for (;;) {
+    if (!rwlock->writer && (!rwlock->nr_writers_queued ||
+                                       !rwlock->writers_preferred))
+        break;
+
+    rwlock->nr_readers_queued++;
+    val = rwlock->readers_wakeup;
+    lll_unlock(rwlock->lock);
+
+    futex_wait(&rwlock->readers_wakeup, val)
+
+    lll_lock(rwlock->lock);
+    rwlock->nr_readers_queued--;
+  }
+  rwlock->readers++;
+  lll_unlock(rwlock->lock);
+}
+
+pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
+{
+  int result = EBUSY;
+  lll_lock(rwlock->lock);
+  if (!rwlock->writer && (!rwlock->nr_writers_queued ||
+                                       !rwlock->writers_preferred))
+    rwlock->readers++;
+  lll_unlock(rwlock->lock);
+  return result;
+}
+
+pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
+{
+  lll_lock(rwlock->lock);
+  for (;;) {
+    if (!rwlock->writer && !rwlock->readers)
+       break;
+
+    rwlock->nr_writers_queued++;
+    val = rwlock->writer_wakeup;
+    lll_unlock(rwlock->lock);
+
+    futex_wait(&rwlock->writer_wakeup, val);
+
+    lll_lock(rwlock->lock);
+    rwlock->nr_writers_queued--;
+  }
+  rwlock->writer = pthread_self();
+  lll_unlock(rwlock->lock);
+}
+
+pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
+{
+  lll_lock(rwlock->lock);
+
+  if (rwlock->writer)
+    rwlock->writer = 0;
+  else
+    rwlock->readers--;
+
+  if (!rwlock->readers) {
+    if (rwlock->nr_writers_queued) {
+      ++rwlock->writer_wakeup;
+      lll_unlock(rwlock->lock);
+      futex_wake(&rwlock->writer_wakeup, 1);
+      return;
+    } else
+      if (rwlock->nr_readers_queued) {
+        ++rwlock->readers_wakeup;
+        lll_unlock(rwlock->lock);
+        futex_wake(&rwlock->readers_wakeup, MAX_INT);
+        return;
+      }
+  }
+
+  lll_unlock(rwlock->lock);
+}
diff --git a/libpthread/nptl/DESIGN-sem.txt b/libpthread/nptl/DESIGN-sem.txt
new file mode 100644 (file)
index 0000000..17eb0c1
--- /dev/null
@@ -0,0 +1,46 @@
+Semaphores pseudocode
+==============================
+
+       int sem_wait(sem_t * sem);
+       int sem_trywait(sem_t * sem);
+       int sem_post(sem_t * sem);
+       int sem_getvalue(sem_t * sem, int * sval);
+
+struct sem_t {
+
+   unsigned int count;
+         - current semaphore count, also used as a futex
+}
+
+sem_wait(sem_t *sem)
+{
+  for (;;) {
+
+    if (atomic_decrement_if_positive(sem->count))
+      break;
+
+    futex_wait(&sem->count, 0)
+  }
+}
+
+sem_post(sem_t *sem)
+{
+  n = atomic_increment(sem->count);
+  // Pass the new value of sem->count
+  futex_wake(&sem->count, n + 1);
+}
+
+sem_trywait(sem_t *sem)
+{
+  if (atomic_decrement_if_positive(sem->count)) {
+    return 0;
+  } else {
+    return EAGAIN;
+  }
+}
+
+sem_getvalue(sem_t *sem, int *sval)
+{
+  *sval = sem->count;
+  read_barrier();
+}
index 9b4ba51..d3386a3 100644 (file)
@@ -31,9 +31,16 @@ libpthread-routines = init vars events version \
                      pthread_mutex_init pthread_mutex_destroy \
                      pthread_mutex_lock pthread_mutex_trylock \
                      pthread_mutex_timedlock pthread_mutex_unlock \
+                     pthread_mutex_consistent \
                      pthread_mutexattr_init pthread_mutexattr_destroy \
                      pthread_mutexattr_getpshared \
                      pthread_mutexattr_setpshared \
+                     pthread_mutexattr_getrobust \
+                     pthread_mutexattr_setrobust \
+                     pthread_mutexattr_getprotocol \
+                     pthread_mutexattr_setprotocol \
+                     pthread_mutexattr_getprioceiling \
+                     pthread_mutexattr_setprioceiling \
                      pthread_mutexattr_gettype pthread_mutexattr_settype \
                      pthread_rwlock_init pthread_rwlock_destroy \
                      pthread_rwlock_rdlock pthread_rwlock_timedrdlock \
diff --git a/libpthread/nptl/TODO b/libpthread/nptl/TODO
new file mode 100644 (file)
index 0000000..70b8fe4
--- /dev/null
@@ -0,0 +1,31 @@
+- we should probably extend pthread_mutexattr_t with a field to create a
+  single linked list of all instances.  This requires changing the
+  pthread_mutexattr_* functions.
+
+
+- a new attribute for mutexes: number of times we spin before calling
+sys_futex
+
+- for adaptive mutexes: when releasing, determine whether somebody spins.
+If yes, for a short time release lock.  If someone else locks no wakeup
+syscall needed.
+
+
+
+- test with threaded process terminating and semadj (?) being applied
+  only after all threads are gone
+
+
+
+- semaphore changes:
+
+  - sem_post should only wake one thread and only when the state of
+    the semaphore changed from 0 to 1
+
+    this also requires that sem_wait and sem_timedwait don't drop the
+    post if they get canceled.
+
+  - possibly add counter field.  This requires reviving the
+    differences between old and new semaphose funtions.  The old ones
+    stay as they are now.  The new once can use an additional field
+    wich is the counter for the number of waiters
diff --git a/libpthread/nptl/TODO-kernel b/libpthread/nptl/TODO-kernel
new file mode 100644 (file)
index 0000000..ad6d2a4
--- /dev/null
@@ -0,0 +1,20 @@
+- setuid/setgid must effect process
+  + test syscalls (getuid) afterwards
+  + test core file content
+
+  + use UID/GID in access(2), chmod(2), chown(2), link(2)
+
+- nice level is process property
+
+- rlimit should be process-wide and SIGXCPU should be sent if all threads
+  together exceed the limit
+
+- getrusage() must return resource utilization for the process
+
+
+
+The following are possible optimizations and in no way required:
+
+
+- the scheduler should be thread group-aware, i.e., it has to give time to
+  the thread group not proportional to the number of threads.
diff --git a/libpthread/nptl/TODO-testing b/libpthread/nptl/TODO-testing
new file mode 100644 (file)
index 0000000..e076e56
--- /dev/null
@@ -0,0 +1,20 @@
+pthread_attr_setguardsize
+
+  test effectiveness
+
+pthread_attr_[sg]etschedparam
+
+  what to test?
+
+pthread_attr_[sg]etstack
+
+  some more tests needed
+
+pthread_getcpuclockid
+
+  check that value is reset -> rt subdir
+
+pthread_getschedparam
+pthread_setschedparam
+
+  what to test?
index dab9f08..7d4f9fd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <tls.h>
 #include <lowlevellock.h>
 #include <link.h>
+#include <bits/kernel-features.h>
 
-#define __getpagesize getpagesize
 
 #ifndef NEED_SEPARATE_REGISTER_STACK
 
 /* Most architectures have exactly one stack pointer.  Some have more.  */
-# define STACK_VARIABLES void *stackaddr = 0
+# define STACK_VARIABLES void *stackaddr = NULL
 
 /* How to pass the values to the 'create_thread' function.  */
 # define STACK_VARIABLES_ARGS stackaddr
@@ -53,7 +53,7 @@
 
 /* We need two stacks.  The kernel will place them but we have to tell
    the kernel about the size of the reserved address space.  */
-# define STACK_VARIABLES void *stackaddr = 0; size_t stacksize
+# define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
 
 /* How to pass the values to the 'create_thread' function.  */
 # define STACK_VARIABLES_ARGS stackaddr, stacksize
 #endif
 
 
-/* Let the architecture add some flags to the mmap() call used to
-   allocate stacks.  */
-#ifndef ARCH_MAP_FLAGS
-# define ARCH_MAP_FLAGS 0
+/* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
+   a stack.  Use it when possible.  */
+#ifndef MAP_STACK
+# define MAP_STACK 0
 #endif
 
 /* This yields the pointer that TLS support code calls the thread pointer.  */
@@ -104,7 +104,7 @@ static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default.  */
 static size_t stack_cache_actsize;
 
 /* Mutex protecting this variable.  */
-static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
+static int stack_cache_lock = LLL_LOCK_INITIALIZER;
 
 /* List of queued stack frames.  */
 static LIST_HEAD (stack_cache);
@@ -112,10 +112,15 @@ static LIST_HEAD (stack_cache);
 /* List of the stacks in use.  */
 static LIST_HEAD (stack_used);
 
+/* We need to record what list operations we are going to do so that,
+   in case of an asynchronous interruption due to a fork() call, we
+   can correct for the work.  */
+static uintptr_t in_flight_stack;
+
 /* List of the threads with user provided stacks in use.  No need to
    initialize this, since it's done in __pthread_initialize_minimal.  */
 list_t __stack_user __attribute__ ((nocommon));
-hidden_def (__stack_user)
+hidden_data_def (__stack_user)
 
 #if COLORING_INCREMENT != 0
 /* Number of threads created.  */
@@ -127,6 +132,36 @@ static unsigned int nptl_ncreated;
 #define FREE_P(descr) ((descr)->tid <= 0)
 
 
+static void
+stack_list_del (list_t *elem)
+{
+  in_flight_stack = (uintptr_t) elem;
+
+  atomic_write_barrier ();
+
+  list_del (elem);
+
+  atomic_write_barrier ();
+
+  in_flight_stack = 0;
+}
+
+
+static void
+stack_list_add (list_t *elem, list_t *list)
+{
+  in_flight_stack = (uintptr_t) elem | 1;
+
+  atomic_write_barrier ();
+
+  list_add (elem, list);
+
+  atomic_write_barrier ();
+
+  in_flight_stack = 0;
+}
+
+
 /* We create a double linked list of all cache entries.  Double linked
    because this allows removing entries from the end.  */
 
@@ -140,7 +175,7 @@ get_cached_stack (size_t *sizep, void **memp)
   struct pthread *result = NULL;
   list_t *entry;
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Search the cache for a matching entry.  We search for the
      smallest stack which has at least the required size.  Note that
@@ -173,22 +208,22 @@ get_cached_stack (size_t *sizep, void **memp)
       || __builtin_expect (result->stackblock_size > 4 * size, 0))
     {
       /* Release the lock.  */
-      lll_unlock (stack_cache_lock);
+      lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
       return NULL;
     }
 
   /* Dequeue the entry.  */
-  list_del (&result->list);
+  stack_list_del (&result->list);
 
   /* And add to the list of stacks in use.  */
-  list_add (&result->list, &stack_used);
+  stack_list_add (&result->list, &stack_used);
 
   /* And decrease the cache size.  */
   stack_cache_actsize -= result->stackblock_size;
 
   /* Release the lock early.  */
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   /* Report size and location of the stack to the caller.  */
   *sizep = result->stackblock_size;
@@ -212,6 +247,45 @@ get_cached_stack (size_t *sizep, void **memp)
 }
 
 
+/* Free stacks until cache size is lower than LIMIT.  */
+void
+__free_stacks (size_t limit)
+{
+  /* We reduce the size of the cache.  Remove the last entries until
+     the size is below the limit.  */
+  list_t *entry;
+  list_t *prev;
+
+  /* Search from the end of the list.  */
+  list_for_each_prev_safe (entry, prev, &stack_cache)
+    {
+      struct pthread *curr;
+
+      curr = list_entry (entry, struct pthread, list);
+      if (FREE_P (curr))
+       {
+         /* Unlink the block.  */
+         stack_list_del (entry);
+
+         /* Account for the freed memory.  */
+         stack_cache_actsize -= curr->stackblock_size;
+
+         /* Free the memory associated with the ELF TLS.  */
+         _dl_deallocate_tls (TLS_TPADJ (curr), false);
+
+         /* Remove this block.  This should never fail.  If it does
+            something is really wrong.  */
+         if (munmap (curr->stackblock, curr->stackblock_size) != 0)
+           abort ();
+
+         /* Maybe we have freed enough.  */
+         if (stack_cache_actsize <= limit)
+           break;
+       }
+    }
+}
+
+
 /* Add a stack frame which is not used anymore to the stack.  Must be
    called with the cache lock held.  */
 static inline void
@@ -221,44 +295,11 @@ queue_stack (struct pthread *stack)
   /* We unconditionally add the stack to the list.  The memory may
      still be in use but it will not be reused until the kernel marks
      the stack as not used anymore.  */
-  list_add (&stack->list, &stack_cache);
+  stack_list_add (&stack->list, &stack_cache);
 
   stack_cache_actsize += stack->stackblock_size;
   if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
-    {
-      /* We reduce the size of the cache.  Remove the last entries
-        until the size is below the limit.  */
-      list_t *entry;
-      list_t *prev;
-
-      /* Search from the end of the list.  */
-      list_for_each_prev_safe (entry, prev, &stack_cache)
-       {
-         struct pthread *curr;
-
-         curr = list_entry (entry, struct pthread, list);
-         if (FREE_P (curr))
-           {
-             /* Unlink the block.  */
-             list_del (entry);
-
-             /* Account for the freed memory.  */
-             stack_cache_actsize -= curr->stackblock_size;
-
-             /* Free the memory associated with the ELF TLS.  */
-             _dl_deallocate_tls (TLS_TPADJ (curr), false);
-
-             /* Remove this block.  This should never fail.  If it
-                does something is really wrong.  */
-             if (munmap (curr->stackblock, curr->stackblock_size) != 0)
-               abort ();
-
-             /* Maybe we have freed enough.  */
-             if (stack_cache_actsize <= stack_cache_maxsize)
-               break;
-           }
-       }
-    }
+    __free_stacks (stack_cache_maxsize);
 }
 
 
@@ -275,9 +316,14 @@ change_stack_perm (struct pthread *pd
                 + (((((pd->stackblock_size - pd->guardsize) / 2)
                      & pagemask) + pd->guardsize) & pagemask));
   size_t len = pd->stackblock + pd->stackblock_size - stack;
-#else
+#elif _STACK_GROWS_DOWN
   void *stack = pd->stackblock + pd->guardsize;
   size_t len = pd->stackblock_size - pd->guardsize;
+#elif _STACK_GROWS_UP
+  void *stack = pd->stackblock;
+  size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
+#else
+# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
 #endif
   if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
     return errno;
@@ -358,6 +404,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
       __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
 #endif
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+      /* The thread must know when private futexes are supported.  */
+      pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
+                                               header.private_futex);
+#endif
+
 #ifdef NEED_DL_SYSINFO
       /* Copy the sysinfo value from the parent.  */
       THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
@@ -376,12 +428,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 
 
       /* Prepare to modify global data.  */
-      lll_lock (stack_cache_lock);
+      lll_lock (stack_cache_lock, LLL_PRIVATE);
 
       /* And add to the list of stacks in use.  */
       list_add (&pd->list, &__stack_user);
 
-      lll_unlock (stack_cache_lock);
+      lll_unlock (stack_cache_lock, LLL_PRIVATE);
     }
   else
     {
@@ -406,8 +458,9 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
       /* Make sure the size of the stack is enough for the guard and
         eventually the thread descriptor.  */
       guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
-      if (__builtin_expect (size < (guardsize + __static_tls_size
-                                   + MINIMAL_REST_STACK + pagesize_m1 + 1),
+      if (__builtin_expect (size < ((guardsize + __static_tls_size
+                                    + MINIMAL_REST_STACK + pagesize_m1)
+                                   & ~pagesize_m1),
                            0))
        /* The stack is too small (or the guard too large).  */
        return EINVAL;
@@ -427,15 +480,14 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 #endif
 
          mem = mmap (NULL, size, prot,
-                     MAP_PRIVATE | MAP_ANONYMOUS | ARCH_MAP_FLAGS, -1, 0);
+                     MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
 
          if (__builtin_expect (mem == MAP_FAILED, 0))
            {
-#ifdef ARCH_RETRY_MMAP
-             mem = ARCH_RETRY_MMAP (size);
-             if (__builtin_expect (mem == MAP_FAILED, 0))
-#endif
-               return errno;
+             if (errno == ENOMEM)
+               __set_errno (EAGAIN);
+
+              return errno;
            }
 
          /* SIZE is guaranteed to be greater than zero.
@@ -490,6 +542,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
          __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
 #endif
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+         /* The thread must know when private futexes are supported.  */
+         pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
+                                                    header.private_futex);
+#endif
+
 #ifdef NEED_DL_SYSINFO
          /* Copy the sysinfo value from the parent.  */
          THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
@@ -512,12 +570,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 
 
          /* Prepare to modify global data.  */
-         lll_lock (stack_cache_lock);
+         lll_lock (stack_cache_lock, LLL_PRIVATE);
 
          /* And add to the list of stacks in use.  */
-         list_add (&pd->list, &stack_used);
+         stack_list_add (&pd->list, &stack_used);
 
-         lll_unlock (stack_cache_lock);
+         lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
 
          /* Note that all of the stack and the thread descriptor is
@@ -533,8 +591,10 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
        {
 #ifdef NEED_SEPARATE_REGISTER_STACK
          char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
-#else
+#elif _STACK_GROWS_DOWN
          char *guard = mem;
+# elif _STACK_GROWS_UP
+         char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
 #endif
          if (mprotect (guard, guardsize, PROT_NONE) != 0)
            {
@@ -542,12 +602,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
            mprot_error:
              err = errno;
 
-             lll_lock (stack_cache_lock);
+             lll_lock (stack_cache_lock, LLL_PRIVATE);
 
              /* Remove the thread from the list.  */
-             list_del (&pd->list);
+             stack_list_del (&pd->list);
 
-             lll_unlock (stack_cache_lock);
+             lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
              /* Get rid of the TLS block we allocated.  */
              _dl_deallocate_tls (TLS_TPADJ (pd), false);
@@ -581,10 +641,14 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
                        oldguard + pd->guardsize - guard - guardsize,
                        prot) != 0)
            goto mprot_error;
-#else
+#elif _STACK_GROWS_DOWN
          if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
                        prot) != 0)
            goto mprot_error;
+#elif _STACK_GROWS_UP
+         if (mprotect ((char *) pd - pd->guardsize,
+                       pd->guardsize - guardsize, prot) != 0)
+           goto mprot_error;
 #endif
 
          pd->guardsize = guardsize;
@@ -599,6 +663,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
      stillborn thread could be canceled while the lock is taken.  */
   pd->lock = LLL_LOCK_INITIALIZER;
 
+  /* The robust mutex lists also need to be initialized
+     unconditionally because the cleanup for the previous stack owner
+     might have happened in the kernel.  */
+  pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
+                                 - offsetof (pthread_mutex_t,
+                                             __data.__list.__next));
+  pd->robust_head.list_op_pending = NULL;
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+  pd->robust_prev = &pd->robust_head;
+#endif
+  pd->robust_head.list = &pd->robust_head;
+
   /* We place the thread descriptor at the end of the stack.  */
   *pdp = pd;
 
@@ -612,8 +688,11 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 #ifdef NEED_SEPARATE_REGISTER_STACK
   *stack = pd->stackblock;
   *stacksize = stacktop - *stack;
-#else
+#elif _STACK_GROWS_DOWN
   *stack = stacktop;
+#elif _STACK_GROWS_UP
+  *stack = pd->stackblock;
+  assert (*stack > 0);
 #endif
 
   return 0;
@@ -624,11 +703,11 @@ void
 internal_function
 __deallocate_stack (struct pthread *pd)
 {
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Remove the thread from the list of threads with user defined
      stacks.  */
-  list_del (&pd->list);
+  stack_list_del (&pd->list);
 
   /* Not much to do.  Just free the mmap()ed memory.  Note that we do
      not reset the 'used' flag in the 'tid' field.  This is done by
@@ -640,7 +719,7 @@ __deallocate_stack (struct pthread *pd)
     /* Free the memory associated with the ELF TLS.  */
     _dl_deallocate_tls (TLS_TPADJ (pd), false);
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 }
 
 
@@ -657,7 +736,7 @@ __make_stacks_executable (void **stack_endp)
   const size_t pagemask = ~(__getpagesize () - 1);
 #endif
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   list_t *runp;
   list_for_each (runp, &stack_used)
@@ -686,7 +765,7 @@ __make_stacks_executable (void **stack_endp)
          break;
       }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   return err;
 }
@@ -701,15 +780,51 @@ __reclaim_stacks (void)
 {
   struct pthread *self = (struct pthread *) THREAD_SELF;
 
-  /* No locking necessary.  The caller is the only stack in use.  */
+  /* No locking necessary.  The caller is the only stack in use.  But
+     we have to be aware that we might have interrupted a list
+     operation.  */
+
+  if (in_flight_stack != 0)
+    {
+      bool add_p = in_flight_stack & 1;
+      list_t *elem = (list_t *) (in_flight_stack & ~UINTMAX_C (1));
+
+      if (add_p)
+       {
+         /* We always add at the beginning of the list.  So in this
+            case we only need to check the beginning of these lists.  */
+         int check_list (list_t *l)
+         {
+           if (l->next->prev != l)
+             {
+               assert (l->next->prev == elem);
+
+               elem->next = l->next;
+               elem->prev = l;
+               l->next = elem;
+
+               return 1;
+             }
+
+           return 0;
+         }
+
+         if (check_list (&stack_used) == 0)
+           (void) check_list (&stack_cache);
+       }
+      else
+       {
+         /* We can simply always replay the delete operation.  */
+         elem->next->prev = elem->prev;
+         elem->prev->next = elem->next;
+       }
+    }
 
   /* Mark all stacks except the still running one as free.  */
   list_t *runp;
   list_for_each (runp, &stack_used)
     {
-      struct pthread *curp;
-
-      curp = list_entry (runp, struct pthread, list);
+      struct pthread *curp = list_entry (runp, struct pthread, list);
       if (curp != self)
        {
          /* This marks the stack as free.  */
@@ -720,16 +835,43 @@ __reclaim_stacks (void)
 
          /* Account for the size of the stack.  */
          stack_cache_actsize += curp->stackblock_size;
+
+         if (curp->specific_used)
+           {
+             /* Clear the thread-specific data.  */
+             memset (curp->specific_1stblock, '\0',
+                     sizeof (curp->specific_1stblock));
+
+             curp->specific_used = false;
+
+             for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
+               if (curp->specific[cnt] != NULL)
+                 {
+                   memset (curp->specific[cnt], '\0',
+                           sizeof (curp->specific_1stblock));
+
+                   /* We have allocated the block which we do not
+                      free here so re-set the bit.  */
+                   curp->specific_used = true;
+                 }
+           }
        }
     }
 
+  /* Reset the PIDs in any cached stacks.  */
+  list_for_each (runp, &stack_cache)
+    {
+      struct pthread *curp = list_entry (runp, struct pthread, list);
+      curp->pid = self->pid;
+    }
+
   /* Add the stack of all running threads to the cache.  */
   list_splice (&stack_used, &stack_cache);
 
   /* Remove the entry for the current thread to from the cache list
      and add it to the list of running threads.  Which of the two
      lists is decided by the user_stack flag.  */
-  list_del (&self->list);
+  stack_list_del (&self->list);
 
   /* Re-initialize the lists for all the threads.  */
   INIT_LIST_HEAD (&stack_used);
@@ -743,6 +885,8 @@ __reclaim_stacks (void)
   /* There is one thread running.  */
   __nptl_nthreads = 1;
 
+  in_flight_stack = 0;
+
   /* Initialize the lock.  */
   stack_cache_lock = LLL_LOCK_INITIALIZER;
 }
@@ -757,7 +901,7 @@ __find_thread_by_id (pid_t tid)
 {
   struct pthread *result = NULL;
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Iterate over the list with system-allocated threads first.  */
   list_t *runp;
@@ -789,24 +933,100 @@ __find_thread_by_id (pid_t tid)
     }
 
  out:
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   return result;
 }
 #endif
 
+
+static void
+internal_function
+setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
+{
+  int ch;
+
+  /* Don't let the thread exit before the setxid handler runs.  */
+  t->setxid_futex = 0;
+
+  do
+    {
+      ch = t->cancelhandling;
+
+      /* If the thread is exiting right now, ignore it.  */
+      if ((ch & EXITING_BITMASK) != 0)
+       return;
+    }
+  while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
+                                              ch | SETXID_BITMASK, ch));
+}
+
+
+static void
+internal_function
+setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
+{
+  int ch;
+
+  do
+    {
+      ch = t->cancelhandling;
+      if ((ch & SETXID_BITMASK) == 0)
+       return;
+    }
+  while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
+                                              ch & ~SETXID_BITMASK, ch));
+
+  /* Release the futex just in case.  */
+  t->setxid_futex = 1;
+  lll_futex_wake (&t->setxid_futex, 1, LLL_PRIVATE);
+}
+
+
+static int
+internal_function
+setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
+{
+  if ((t->cancelhandling & SETXID_BITMASK) == 0)
+    return 0;
+
+  int val;
+  INTERNAL_SYSCALL_DECL (err);
+#if __ASSUME_TGKILL
+  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
+                         t->tid, SIGSETXID);
+#else
+# ifdef __NR_tgkill
+  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
+                         t->tid, SIGSETXID);
+  if (INTERNAL_SYSCALL_ERROR_P (val, err)
+      && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
+# endif
+    val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
+#endif
+
+  /* If this failed, it must have had not started yet or else exited.  */
+  if (!INTERNAL_SYSCALL_ERROR_P (val, err))
+    {
+      atomic_increment (&cmdp->cntr);
+      return 1;
+    }
+  else
+    return 0;
+}
+
+
 int
 attribute_hidden
 __nptl_setxid (struct xid_command *cmdp)
 {
+  int signalled;
   int result;
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   __xidcmd = cmdp;
   cmdp->cntr = 0;
 
-  INTERNAL_SYSCALL_DECL (err);
-
   struct pthread *self = THREAD_SELF;
 
   /* Iterate over the list with system-allocated threads first.  */
@@ -814,65 +1034,79 @@ __nptl_setxid (struct xid_command *cmdp)
   list_for_each (runp, &stack_used)
     {
       struct pthread *t = list_entry (runp, struct pthread, list);
-      if (t != self)
-       {
-         int val;
-#if __ASSUME_TGKILL
-         val = INTERNAL_SYSCALL (tgkill, err, 3,
-                                 THREAD_GETMEM (THREAD_SELF, pid),
-                                 t->tid, SIGSETXID);
-#else
-# ifdef __NR_tgkill
-         val = INTERNAL_SYSCALL (tgkill, err, 3,
-                                 THREAD_GETMEM (THREAD_SELF, pid),
-                                 t->tid, SIGSETXID);
-         if (INTERNAL_SYSCALL_ERROR_P (val, err)
-             && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
-# endif
-           val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
-#endif
+      if (t == self)
+       continue;
 
-         if (!INTERNAL_SYSCALL_ERROR_P (val, err))
-           atomic_increment (&cmdp->cntr);
-       }
+      setxid_mark_thread (cmdp, t);
     }
 
   /* Now the list with threads using user-allocated stacks.  */
   list_for_each (runp, &__stack_user)
     {
       struct pthread *t = list_entry (runp, struct pthread, list);
-      if (t != self)
+      if (t == self)
+       continue;
+
+      setxid_mark_thread (cmdp, t);
+    }
+
+  /* Iterate until we don't succeed in signalling anyone.  That means
+     we have gotten all running threads, and their children will be
+     automatically correct once started.  */
+  do
+    {
+      signalled = 0;
+
+      list_for_each (runp, &stack_used)
        {
-         int val;
-#if __ASSUME_TGKILL
-         val = INTERNAL_SYSCALL (tgkill, err, 3,
-                                 THREAD_GETMEM (THREAD_SELF, pid),
-                                 t->tid, SIGSETXID);
-#else
-# ifdef __NR_tgkill
-         val = INTERNAL_SYSCALL (tgkill, err, 3,
-                                 THREAD_GETMEM (THREAD_SELF, pid),
-                                 t->tid, SIGSETXID);
-         if (INTERNAL_SYSCALL_ERROR_P (val, err)
-             && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
-# endif
-           val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
-#endif
+         struct pthread *t = list_entry (runp, struct pthread, list);
+         if (t == self)
+           continue;
+
+         signalled += setxid_signal_thread (cmdp, t);
+       }
 
-         if (!INTERNAL_SYSCALL_ERROR_P (val, err))
-           atomic_increment (&cmdp->cntr);
+      list_for_each (runp, &__stack_user)
+       {
+         struct pthread *t = list_entry (runp, struct pthread, list);
+         if (t == self)
+           continue;
+
+         signalled += setxid_signal_thread (cmdp, t);
+       }
+
+      int cur = cmdp->cntr;
+      while (cur != 0)
+       {
+         lll_futex_wait (&cmdp->cntr, cur, LLL_PRIVATE);
+         cur = cmdp->cntr;
        }
     }
+  while (signalled != 0);
 
-  int cur = cmdp->cntr;
-  while (cur != 0)
+  /* Clean up flags, so that no thread blocks during exit waiting
+     for a signal which will never come.  */
+  list_for_each (runp, &stack_used)
     {
-      lll_futex_wait (&cmdp->cntr, cur);
-      cur = cmdp->cntr;
+      struct pthread *t = list_entry (runp, struct pthread, list);
+      if (t == self)
+       continue;
+
+      setxid_unmark_thread (cmdp, t);
+    }
+
+  list_for_each (runp, &__stack_user)
+    {
+      struct pthread *t = list_entry (runp, struct pthread, list);
+      if (t == self)
+       continue;
+
+      setxid_unmark_thread (cmdp, t);
     }
 
   /* This must be last, otherwise the current thread might not have
      permissions to send SIGSETXID syscall to the other threads.  */
+  INTERNAL_SYSCALL_DECL (err);
   result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
                                 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
   if (INTERNAL_SYSCALL_ERROR_P (result, err))
@@ -881,7 +1115,7 @@ __nptl_setxid (struct xid_command *cmdp)
       result = -1;
     }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
   return result;
 }
 
@@ -910,7 +1144,7 @@ void
 attribute_hidden
 __pthread_init_static_tls (struct link_map *map)
 {
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Iterate over the list with system-allocated threads first.  */
   list_t *runp;
@@ -921,5 +1155,62 @@ __pthread_init_static_tls (struct link_map *map)
   list_for_each (runp, &__stack_user)
     init_one_static_tls (list_entry (runp, struct pthread, list), map);
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
+}
+
+
+void
+attribute_hidden
+__wait_lookup_done (void)
+{
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
+
+  struct pthread *self = THREAD_SELF;
+
+  /* Iterate over the list with system-allocated threads first.  */
+  list_t *runp;
+  list_for_each (runp, &stack_used)
+    {
+      struct pthread *t = list_entry (runp, struct pthread, list);
+      if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
+       continue;
+
+      int *const gscope_flagp = &t->header.gscope_flag;
+
+      /* We have to wait until this thread is done with the global
+        scope.  First tell the thread that we are waiting and
+        possibly have to be woken.  */
+      if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
+                                               THREAD_GSCOPE_FLAG_WAIT,
+                                               THREAD_GSCOPE_FLAG_USED))
+       continue;
+
+      do
+       lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
+      while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
+    }
+
+  /* Now the list with threads using user-allocated stacks.  */
+  list_for_each (runp, &__stack_user)
+    {
+      struct pthread *t = list_entry (runp, struct pthread, list);
+      if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
+       continue;
+
+      int *const gscope_flagp = &t->header.gscope_flag;
+
+      /* We have to wait until this thread is done with the global
+        scope.  First tell the thread that we are waiting and
+        possibly have to be woken.  */
+      if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
+                                               THREAD_GSCOPE_FLAG_WAIT,
+                                               THREAD_GSCOPE_FLAG_USED))
+       continue;
+
+      do
+       lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
+      while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
+    }
+
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 }
index 1d28d38..eac7973 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -70,14 +70,13 @@ __pthread_disable_asynccancel (int oldtype)
     return;
 
   struct pthread *self = THREAD_SELF;
+  int newval;
+
   int oldval = THREAD_GETMEM (self, cancelhandling);
 
   while (1)
     {
-      int newval = oldval & ~CANCELTYPE_BITMASK;
-
-      if (newval == oldval)
-       break;
+      newval = oldval & ~CANCELTYPE_BITMASK;
 
       int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
                                              oldval);
@@ -87,4 +86,15 @@ __pthread_disable_asynccancel (int oldtype)
       /* Prepare the next round.  */
       oldval = curval;
     }
+
+  /* We cannot return when we are being canceled.  Upon return the
+     thread might be things which would have to be undone.  The
+     following loop should loop until the cancellation signal is
+     delivered.  */
+  while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
+                          == CANCELING_BITMASK, 0))
+    {
+      lll_futex_wait (&self->cancelhandling, newval, LLL_PRIVATE);
+      newval = THREAD_GETMEM (self, cancelhandling);
+    }
 }
index 1a8d91b..c355eae 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -36,6 +36,7 @@
 #endif
 #define __need_res_state
 #include <resolv.h>
+#include <bits/kernel-features.h>
 
 #ifndef TCB_ALIGNMENT
 # define TCB_ALIGNMENT sizeof (double)
@@ -101,6 +102,23 @@ struct xid_command
 };
 
 
+/* Data structure used by the kernel to find robust futexes.  */
+struct robust_list_head
+{
+  void *list;
+  long int futex_offset;
+  void *list_op_pending;
+};
+
+
+/* Data strcture used to handle thread priority protection.  */
+struct priority_protection_data
+{
+  int priomax;
+  unsigned int priomap[];
+};
+
+
 /* Thread descriptor data structure.  */
 struct pthread
 {
@@ -113,6 +131,10 @@ struct pthread
     struct
     {
       int multiple_threads;
+      int gscope_flag;
+# ifndef __ASSUME_PRIVATE_FUTEX
+      int private_futex;
+# endif
     } header;
 #endif
 
@@ -120,7 +142,7 @@ struct pthread
        is private and subject to change without affecting the official ABI.
        We just have it here in case it might be convenient for some
        implementation-specific instrumentation hack or suchlike.  */
-    void *__padding[16];
+    void *__padding[24];
   };
 
   /* This descriptor's link on the `stack_used' or `__stack_user' list.  */
@@ -133,6 +155,82 @@ struct pthread
   /* Process ID - thread group ID in kernel speak.  */
   pid_t pid;
 
+  /* List of robust mutexes the thread is holding.  */
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+  void *robust_prev;
+  struct robust_list_head robust_head;
+
+  /* The list above is strange.  It is basically a double linked list
+     but the pointer to the next/previous element of the list points
+     in the middle of the object, the __next element.  Whenever
+     casting to __pthread_list_t we need to adjust the pointer
+     first.  */
+# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
+
+# define ENQUEUE_MUTEX_BOTH(mutex, val)                                              \
+  do {                                                                       \
+    __pthread_list_t *next = (__pthread_list_t *)                            \
+      ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul)   \
+       - QUEUE_PTR_ADJUST);                                                  \
+    next->__prev = (void *) &mutex->__data.__list.__next;                    \
+    mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF,                \
+                                                robust_head.list);           \
+    mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head;        \
+    THREAD_SETMEM (THREAD_SELF, robust_head.list,                            \
+                  (void *) (((uintptr_t) &mutex->__data.__list.__next)       \
+                            | val));                                         \
+  } while (0)
+# define DEQUEUE_MUTEX(mutex) \
+  do {                                                                       \
+    __pthread_list_t *next = (__pthread_list_t *)                            \
+      ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul)           \
+       - QUEUE_PTR_ADJUST);                                                  \
+    next->__prev = mutex->__data.__list.__prev;                                      \
+    __pthread_list_t *prev = (__pthread_list_t *)                            \
+      ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul)           \
+       - QUEUE_PTR_ADJUST);                                                  \
+    prev->__next = mutex->__data.__list.__next;                                      \
+    mutex->__data.__list.__prev = NULL;                                              \
+    mutex->__data.__list.__next = NULL;                                              \
+  } while (0)
+#else
+  union
+  {
+    __pthread_slist_t robust_list;
+    struct robust_list_head robust_head;
+  };
+
+# define ENQUEUE_MUTEX_BOTH(mutex, val)                                              \
+  do {                                                                       \
+    mutex->__data.__list.__next                                                      \
+      = THREAD_GETMEM (THREAD_SELF, robust_list.__next);                     \
+    THREAD_SETMEM (THREAD_SELF, robust_list.__next,                          \
+                  (void *) (((uintptr_t) &mutex->__data.__list) | val));     \
+  } while (0)
+# define DEQUEUE_MUTEX(mutex) \
+  do {                                                                       \
+    __pthread_slist_t *runp = (__pthread_slist_t *)                          \
+      (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
+    if (runp == &mutex->__data.__list)                                       \
+      THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next);         \
+    else                                                                     \
+      {                                                                              \
+       __pthread_slist_t *next = (__pthread_slist_t *)               \
+         (((uintptr_t) runp->__next) & ~1ul);                                \
+       while (next != &mutex->__data.__list)                                 \
+         {                                                                   \
+           runp = next;                                                      \
+           next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
+         }                                                                   \
+                                                                             \
+       runp->__next = next->__next;                                          \
+       mutex->__data.__list.__next = NULL;                                   \
+      }                                                                              \
+  } while (0)
+#endif
+#define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
+#define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
+
   /* List of cleanup buffers.  */
   struct _pthread_cleanup_buffer *cleanup;
 
@@ -144,25 +242,25 @@ struct pthread
   int cancelhandling;
   /* Bit set if cancellation is disabled.  */
 #define CANCELSTATE_BIT                0
-#define CANCELSTATE_BITMASK    0x01
+#define CANCELSTATE_BITMASK    (0x01 << CANCELSTATE_BIT)
   /* Bit set if asynchronous cancellation mode is selected.  */
 #define CANCELTYPE_BIT         1
-#define CANCELTYPE_BITMASK     0x02
+#define CANCELTYPE_BITMASK     (0x01 << CANCELTYPE_BIT)
   /* Bit set if canceling has been initiated.  */
 #define CANCELING_BIT          2
-#define CANCELING_BITMASK      0x04
+#define CANCELING_BITMASK      (0x01 << CANCELING_BIT)
   /* Bit set if canceled.  */
 #define CANCELED_BIT           3
-#define CANCELED_BITMASK       0x08
+#define CANCELED_BITMASK       (0x01 << CANCELED_BIT)
   /* Bit set if thread is exiting.  */
 #define EXITING_BIT            4
-#define EXITING_BITMASK                0x10
+#define EXITING_BITMASK                (0x01 << EXITING_BIT)
   /* Bit set if thread terminated and TCB is freed.  */
 #define TERMINATED_BIT         5
-#define TERMINATED_BITMASK     0x20
+#define TERMINATED_BITMASK     (0x01 << TERMINATED_BIT)
   /* Bit set if thread is supposed to change XID.  */
 #define SETXID_BIT             6
-#define SETXID_BITMASK         0x40
+#define SETXID_BITMASK         (0x01 << SETXID_BIT)
   /* Mask for the rest.  Helps the compiler to optimize.  */
 #define CANCEL_RESTMASK                0xffffff80
 
@@ -174,6 +272,9 @@ struct pthread
               | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK))     \
    == (CANCELTYPE_BITMASK | CANCELED_BITMASK))
 
+  /* Flags.  Including those copied from the thread attribute.  */
+  int flags;
+
   /* We allocate one block of references here.  This should be enough
      to avoid allocating any memory dynamically for most applications.  */
   struct pthread_key_data
@@ -187,12 +288,12 @@ struct pthread
     void *data;
   } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE];
 
-  /* Flag which is set when specific data is set.  */
-  bool specific_used;
-
   /* Two-level array for the thread-specific data.  */
   struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE];
 
+  /* Flag which is set when specific data is set.  */
+  bool specific_used;
+
   /* True if events must be reported.  */
   bool report_events;
 
@@ -202,11 +303,15 @@ struct pthread
   /* True if thread must stop at startup time.  */
   bool stopped_start;
 
+  /* The parent's cancel handling at the time of the pthread_create
+     call.  This might be needed to undo the effects of a cancellation.  */
+  int parent_cancelhandling;
+
   /* Lock to synchronize access to the descriptor.  */
-  lll_lock_t lock;
+  int lock;
 
   /* Lock for synchronizing setxid calls.  */
-  lll_lock_t setxid_futex;
+  int setxid_futex;
 
 #if HP_TIMING_AVAIL
   /* Offset of the CPU clock at start thread start time.  */
@@ -223,9 +328,6 @@ struct pthread
   /* Check whether a thread is detached.  */
 #define IS_DETACHED(pd) ((pd)->joinid == (pd))
 
-  /* Flags.  Including those copied from the thread attribute.  */
-  int flags;
-
   /* The result of the thread function.  */
   void *result;
 
@@ -257,6 +359,9 @@ struct pthread
   /* This is what the user specified and what we will report.  */
   size_t reported_guardsize;
 
+  /* Thread Priority Protection data.  */
+  struct priority_protection_data *tpp;
+
   /* Resolver state.  */
   struct __res_state res;
 
index 5b71f8e..4c5ada3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <pthreadP.h>
 #include <signal.h>
 #include <stdlib.h>
+
 #include <atomic.h>
 #include <sysdep.h>
 
 
 /* Pointers to the libc functions.  */
 struct pthread_functions __libc_pthread_functions attribute_hidden;
+int __libc_pthread_functions_init attribute_hidden;
 
 
 #define FORWARD2(name, rettype, decl, params, defaction) \
 rettype                                                                              \
 name decl                                                                    \
 {                                                                            \
-  if (__libc_pthread_functions.ptr_##name == NULL)                           \
+  if (!__libc_pthread_functions_init)                                        \
     defaction;                                                               \
                                                                              \
-  return __libc_pthread_functions.ptr_##name params;                         \
+  return PTHFCT_CALL (ptr_##name, params);                                   \
 }
 
 #define FORWARD(name, decl, params, defretval) \
@@ -123,34 +125,13 @@ FORWARD (pthread_setschedparam,
 
 FORWARD (pthread_mutex_destroy, (pthread_mutex_t *mutex), (mutex), 0)
 
-libc_hidden_proto(pthread_mutex_init)
 FORWARD (pthread_mutex_init,
         (pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr),
         (mutex, mutexattr), 0)
-strong_alias(pthread_mutex_init, __pthread_mutex_init)
-libc_hidden_def(pthread_mutex_init)
-
-libc_hidden_proto(pthread_mutex_trylock)
-FORWARD (pthread_mutex_trylock, (pthread_mutex_t *mutex), (mutex), 0)
-strong_alias(pthread_mutex_trylock, __pthread_mutex_trylock)
-libc_hidden_def(pthread_mutex_trylock)
 
-libc_hidden_proto(pthread_mutex_lock)
 FORWARD (pthread_mutex_lock, (pthread_mutex_t *mutex), (mutex), 0)
-strong_alias(pthread_mutex_lock, __pthread_mutex_lock)
-libc_hidden_def(pthread_mutex_lock)
 
-libc_hidden_proto(pthread_mutex_unlock)
 FORWARD (pthread_mutex_unlock, (pthread_mutex_t *mutex), (mutex), 0)
-strong_alias(pthread_mutex_unlock, __pthread_mutex_unlock)
-libc_hidden_def(pthread_mutex_unlock)
-
-FORWARD (pthread_mutexattr_init, (pthread_mutexattr_t *attr), (attr), 0)
-
-FORWARD (pthread_mutexattr_destroy, (pthread_mutexattr_t *attr), (attr), 0)
-
-FORWARD (pthread_mutexattr_settype, (pthread_mutexattr_t *attr, int kind),
-                                   (attr, kind), 0)
 
 
 FORWARD2 (pthread_self, pthread_t, (void), (), return 0)
@@ -163,7 +144,8 @@ FORWARD (pthread_setcanceltype, (int type, int *oldtype), (type, oldtype), 0)
 
 #define return /* value is void */
 FORWARD2(__pthread_unwind,
-        void attribute_hidden __attribute ((noreturn)) __cleanup_fct_attribute,
+        void attribute_hidden __attribute ((noreturn)) __cleanup_fct_attribute
+        attribute_compat_text_section,
         (__pthread_unwind_buf_t *buf), (buf), {
                       /* We cannot call abort() here.  */
                       INTERNAL_SYSCALL_DECL (err);
index 0c55bbb..765dc5d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -18,6 +18,7 @@
    02111-1307 USA.  */
 
 #include <assert.h>
+#include <errno.h>
 #include <limits.h>
 #include <signal.h>
 #include <stdlib.h>
 #include <ldsodefs.h>
 #include <tls.h>
 #include <fork.h>
+#include <version.h>
 #include <smp.h>
 #include <lowlevellock.h>
-#include <version.h>
-
-
-#ifndef __NR_set_tid_address
-/* XXX For the time being...  Once we can rely on the kernel headers
-   having the definition remove these lines.  */
-#if defined __s390__
-# define __NR_set_tid_address  252
-#elif defined __ia64__
-# define __NR_set_tid_address  1233
-#elif defined __i386__
-# define __NR_set_tid_address  258
-#elif defined __x86_64__
-# define __NR_set_tid_address  218
-#elif defined __powerpc__
-# define __NR_set_tid_address  232
-#elif defined __sparc__
-# define __NR_set_tid_address  166
-#else
-# error "define __NR_set_tid_address"
-#endif
-#endif
+#include <bits/kernel-features.h>
 
 
 /* Size and alignment of static TLS block.  */
 size_t __static_tls_size;
 size_t __static_tls_align_m1;
 
+#ifndef __ASSUME_SET_ROBUST_LIST
+/* Negative if we do not have the system call and we can use it.  */
+int __set_robust_list_avail;
+# define set_robust_list_not_avail() \
+  __set_robust_list_avail = -1
+#else
+# define set_robust_list_not_avail() do { } while (0)
+#endif
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+/* Nonzero if we do not have FUTEX_CLOCK_REALTIME.  */
+int __have_futex_clock_realtime;
+# define __set_futex_clock_realtime() \
+  __have_futex_clock_realtime = 1
+#else
+#define __set_futex_clock_realtime() do { } while (0)
+#endif
+
 /* Version of the library, used in libthread_db to detect mismatches.  */
 static const char nptl_version[] __attribute_used__ = VERSION;
 
 
-#if defined USE_TLS && !defined SHARED
+#ifndef SHARED
 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
 #endif
 
-int
-__libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact);
+#ifdef SHARED
+static void nptl_freeres (void);
 
 
-#ifdef SHARED
 static const struct pthread_functions pthread_functions =
   {
     .ptr_pthread_attr_destroy = __pthread_attr_destroy,
@@ -98,10 +96,10 @@ static const struct pthread_functions pthread_functions =
     .ptr___pthread_exit = __pthread_exit,
     .ptr_pthread_getschedparam = __pthread_getschedparam,
     .ptr_pthread_setschedparam = __pthread_setschedparam,
-    .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
-    .ptr_pthread_mutex_init = __pthread_mutex_init,
-    .ptr_pthread_mutex_lock = __pthread_mutex_lock,
-    .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
+    .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
+    .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
+    .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
+    .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
     .ptr_pthread_self = __pthread_self,
     .ptr_pthread_setcancelstate = __pthread_setcancelstate,
     .ptr_pthread_setcanceltype = __pthread_setcanceltype,
@@ -118,7 +116,9 @@ static const struct pthread_functions pthread_functions =
     .ptr_nthreads = &__nptl_nthreads,
     .ptr___pthread_unwind = &__pthread_unwind,
     .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
-    .ptr__nptl_setxid = __nptl_setxid
+    .ptr__nptl_setxid = __nptl_setxid,
+    /* For now only the stack cache needs to be freed.  */
+    .ptr_freeres = nptl_freeres
   };
 # define ptr_pthread_functions &pthread_functions
 #else
@@ -126,10 +126,30 @@ static const struct pthread_functions pthread_functions =
 #endif
 
 
+#ifdef SHARED
+/* This function is called indirectly from the freeres code in libc.  */
+static void
+__libc_freeres_fn_section
+nptl_freeres (void)
+{
+  __unwind_freeres ();
+  __free_stacks (0);
+}
+#endif
+
+
 /* For asynchronous cancellation we use a signal.  This is the handler.  */
 static void
 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
 {
+#ifdef __ASSUME_CORRECT_SI_PID
+  /* Determine the process ID.  It might be negative if the thread is
+     in the middle of a fork() call.  */
+  pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
+  if (__builtin_expect (pid < 0, 0))
+    pid = -pid;
+#endif
+
   /* Safety check.  It would be possible to call this function for
      other signals and send a signal from another process.  This is not
      correct and might even be a security problem.  Try to catch as
@@ -138,7 +158,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
 #ifdef __ASSUME_CORRECT_SI_PID
       /* Kernels before 2.5.75 stored the thread ID and not the process
         ID in si_pid so we skip this test.  */
-      || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
+      || si->si_pid != pid
 #endif
       || si->si_code != SI_TKILL)
     return;
@@ -183,6 +203,14 @@ struct xid_command *__xidcmd attribute_hidden;
 static void
 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
 {
+#ifdef __ASSUME_CORRECT_SI_PID
+  /* Determine the process ID.  It might be negative if the thread is
+     in the middle of a fork() call.  */
+  pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
+  if (__builtin_expect (pid < 0, 0))
+    pid = -pid;
+#endif
+
   /* Safety check.  It would be possible to call this function for
      other signals and send a signal from another process.  This is not
      correct and might even be a security problem.  Try to catch as
@@ -191,7 +219,7 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
 #ifdef __ASSUME_CORRECT_SI_PID
       /* Kernels before 2.5.75 stored the thread ID and not the process
         ID in si_pid so we skip this test.  */
-      || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
+      || si->si_pid != pid
 #endif
       || si->si_code != SI_TKILL)
     return;
@@ -200,8 +228,23 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
   INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
                        __xidcmd->id[1], __xidcmd->id[2]);
 
+  /* Reset the SETXID flag.  */
+  struct pthread *self = THREAD_SELF;
+  int flags, newval;
+  do
+    {
+      flags = THREAD_GETMEM (self, cancelhandling);
+      newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
+                                         flags & ~SETXID_BITMASK, flags);
+    }
+  while (flags != newval);
+
+  /* And release the futex.  */
+  self->setxid_futex = 1;
+  lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
+
   if (atomic_decrement_val (&__xidcmd->cntr) == 0)
-    lll_futex_wake (&__xidcmd->cntr, 1);
+    lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
 }
 
 
@@ -210,6 +253,9 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
 
 
+/* This can be set by the debugger before initialization is complete.  */
+static bool __nptl_initial_report_events __attribute_used__;
+
 void
 __pthread_initialize_minimal_internal (void)
 {
@@ -237,6 +283,55 @@ __pthread_initialize_minimal_internal (void)
   THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
 #endif
 
+  /* Initialize the robust mutex data.  */
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+  pd->robust_prev = &pd->robust_head;
+#endif
+  pd->robust_head.list = &pd->robust_head;
+#ifdef __NR_set_robust_list
+  pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
+                                 - offsetof (pthread_mutex_t,
+                                             __data.__list.__next));
+  int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
+                             sizeof (struct robust_list_head));
+  if (INTERNAL_SYSCALL_ERROR_P (res, err))
+#endif
+    set_robust_list_not_avail ();
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+  /* Private futexes are always used (at least internally) so that
+     doing the test once this early is beneficial.  */
+  {
+    int word = 0;
+    word = INTERNAL_SYSCALL (futex, err, 3, &word,
+                           FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
+    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
+      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
+  }
+
+  /* Private futexes have been introduced earlier than the
+     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
+     know the former are not supported.  This also means we know the
+     kernel will return ENOSYS for unknown operations.  */
+  if (THREAD_GETMEM (pd, header.private_futex) != 0)
+#endif
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+    {
+      int word = 0;
+      /* NB: the syscall actually takes six parameters.  The last is the
+        bit mask.  But since we will not actually wait at all the value
+        is irrelevant.  Given that passing six parameters is difficult
+        on some architectures we just pass whatever random value the
+        calling convention calls for to the kernel.  It causes no harm.  */
+      word = INTERNAL_SYSCALL (futex, err, 5, &word,
+                              FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
+                              | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
+      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
+      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
+       __set_futex_clock_realtime ();
+    }
+#endif
+
   /* Set initial thread's stack block from 0 up to __libc_stack_end.
      It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
      purposes this is good enough.  */
@@ -246,6 +341,9 @@ __pthread_initialize_minimal_internal (void)
   INIT_LIST_HEAD (&__stack_user);
   list_add (&pd->list, &__stack_user);
 
+  /* Before initializing __stack_user, the debugger could not find us and
+     had to set __nptl_initial_report_events.  Propagate its setting.  */
+  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
 
   /* Install the cancellation signal handler.  If for some reason we
      cannot install the handler we do not abort.  Maybe we should, but
@@ -311,6 +409,15 @@ __pthread_initialize_minimal_internal (void)
   /* Transfer the old value from the dynamic linker's internal location.  */
   *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
   GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
+
+  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
+     keep the lock count from the ld.so implementation.  */
+  GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
+  GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
+  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
+  GL(dl_load_lock).mutex.__data.__count = 0;
+  while (rtld_lock_count-- > 0)
+    INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);
 #endif
 
   GL(dl_init_static_tls) = &__pthread_init_static_tls;
index c9237e0..e9334a4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
-#include <setjmp.h>
-#include <stdlib.h>
 #include "pthreadP.h"
-#include "atomic.h"
-#include <bits/libc-lock.h>
 
 
-#ifndef NOT_IN_libc
-
-/* The next two functions are similar to pthread_setcanceltype() but
-   more specialized for the use in the cancelable functions like write().
-   They do not need to check parameters etc.  */
-int
-attribute_hidden
-__libc_enable_asynccancel (void)
-{
-  struct pthread *self = THREAD_SELF;
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-
-  while (1)
-    {
-      int newval = oldval | CANCELTYPE_BITMASK;
-
-      if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
-       {
-         /* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
-            stop right here.  */
-         if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
-           break;
-
-         int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-                                                 newval, oldval);
-         if (__builtin_expect (curval != oldval, 0))
-           {
-             /* Somebody else modified the word, try again.  */
-             oldval = curval;
-             continue;
-           }
-
-         THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-
-         __do_cancel ();
-
-         /* NOTREACHED */
-       }
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-                                             oldval);
-      if (__builtin_expect (curval == oldval, 1))
-       break;
-
-      /* Prepare the next round.  */
-      oldval = curval;
-    }
-
-  return oldval;
-}
-
-
-void
-internal_function attribute_hidden
-__libc_disable_asynccancel (int oldtype)
-{
-  /* If asynchronous cancellation was enabled before we do not have
-     anything to do.  */
-  if (oldtype & CANCELTYPE_BITMASK)
-    return;
-
-  struct pthread *self = THREAD_SELF;
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-
-  while (1)
-    {
-      int newval = oldval & ~CANCELTYPE_BITMASK;
-
-      if (newval == oldval)
-       break;
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-                                             oldval);
-      if (__builtin_expect (curval == oldval, 1))
-       break;
-
-      /* Prepare the next round.  */
-      oldval = curval;
-    }
-}
-
-
-void
-__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
-{
-  if (f->__do_it)
-    f->__cancel_routine (f->__cancel_arg);
-}
-
-#endif
+#define __pthread_enable_asynccancel __libc_enable_asynccancel
+#define __pthread_disable_asynccancel __libc_disable_asynccancel
+#include "cancellation.c"
index f5fdede..f72ea26 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -20,7 +20,7 @@
 #include <setjmp.h>
 #include <stdlib.h>
 #include "pthreadP.h"
-#include "jmpbuf-unwind.h"
+#include <jmpbuf-unwind.h>
 
 void
 __pthread_cleanup_upto (__jmp_buf target, char *targetframe)
index 09a08ec..4728a7c 100644 (file)
 #include <sysdep.h>
 #include "pthreadP.h"
 
-
 extern __typeof(system) __libc_system;
 #include <system.c>
 
+
 int
 system (const char *line)
 {
index 2bb4d0d..0975b7a 100644 (file)
@@ -8,5 +8,6 @@ EDEADLK         EDEADLK
 EINTR          EINTR
 EINVAL         EINVAL
 ENOSYS         ENOSYS
+EOVERFLOW      EOVERFLOW
 ETIMEDOUT      ETIMEDOUT
 EWOULDBLOCK    EWOULDBLOCK
index 149c505..8124c66 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -31,6 +31,7 @@
 #include <internaltypes.h>
 #include <pthread-functions.h>
 #include <atomic.h>
+#include <bits/kernel-features.h>
 
 
 /* Atomic operations on TLS memory.  */
 #endif
 
 
+/* Magic cookie representing robust mutex with dead owner.  */
+#define PTHREAD_MUTEX_INCONSISTENT     INT_MAX
+/* Magic cookie representing not recoverable robust mutex.  */
+#define PTHREAD_MUTEX_NOTRECOVERABLE   (INT_MAX - 1)
+
+
+/* Internal mutex type value.  */
+enum
+{
+  PTHREAD_MUTEX_KIND_MASK_NP = 3,
+  PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
+  PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
+  = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+  PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
+  = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+  PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
+  = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+  PTHREAD_MUTEX_PRIO_INHERIT_NP = 32,
+  PTHREAD_MUTEX_PI_NORMAL_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL,
+  PTHREAD_MUTEX_PI_RECURSIVE_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+  PTHREAD_MUTEX_PI_ERRORCHECK_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+  PTHREAD_MUTEX_PI_ADAPTIVE_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+  PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP,
+  PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
+  PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP,
+  PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
+  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP,
+  PTHREAD_MUTEX_PRIO_PROTECT_NP = 64,
+  PTHREAD_MUTEX_PP_NORMAL_NP
+  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL,
+  PTHREAD_MUTEX_PP_RECURSIVE_NP
+  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+  PTHREAD_MUTEX_PP_ERRORCHECK_NP
+  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+  PTHREAD_MUTEX_PP_ADAPTIVE_NP
+  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP
+};
+#define PTHREAD_MUTEX_PSHARED_BIT 128
+
+#define PTHREAD_MUTEX_TYPE(m) \
+  ((m)->__data.__kind & 127)
+
+#if LLL_PRIVATE == 0 && LLL_SHARED == 128
+# define PTHREAD_MUTEX_PSHARED(m) \
+  ((m)->__data.__kind & 128)
+#else
+# define PTHREAD_MUTEX_PSHARED(m) \
+  (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE)
+#endif
+
+/* The kernel when waking robust mutexes on exit never uses
+   FUTEX_PRIVATE_FLAG FUTEX_WAKE.  */
+#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED
+
+/* Ceiling in __data.__lock.  __data.__lock is signed, so don't
+   use the MSB bit in there, but in the mask also include that bit,
+   so that the compiler can optimize & PTHREAD_MUTEX_PRIO_CEILING_MASK
+   masking if the value is then shifted down by
+   PTHREAD_MUTEX_PRIO_CEILING_SHIFT.  */
+#define PTHREAD_MUTEX_PRIO_CEILING_SHIFT       19
+#define PTHREAD_MUTEX_PRIO_CEILING_MASK                0xfff80000
+
+
+/* Flags in mutex attr.  */
+#define PTHREAD_MUTEXATTR_PROTOCOL_SHIFT       28
+#define PTHREAD_MUTEXATTR_PROTOCOL_MASK                0x30000000
+#define PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT   12
+#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK    0x00fff000
+#define PTHREAD_MUTEXATTR_FLAG_ROBUST          0x40000000
+#define PTHREAD_MUTEXATTR_FLAG_PSHARED         0x80000000
+#define PTHREAD_MUTEXATTR_FLAG_BITS \
+  (PTHREAD_MUTEXATTR_FLAG_ROBUST | PTHREAD_MUTEXATTR_FLAG_PSHARED \
+   | PTHREAD_MUTEXATTR_PROTOCOL_MASK | PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
+
+
+/* Check whether rwlock prefers readers.   */
+#define PTHREAD_RWLOCK_PREFER_READER_P(rwlock) \
+  ((rwlock)->__data.__flags == 0)
+
+
+/* Bits used in robust mutex implementation.  */
+#define FUTEX_WAITERS          0x80000000
+#define FUTEX_OWNER_DIED       0x40000000
+#define FUTEX_TID_MASK         0x3fffffff
+
+
 /* Internal variables.  */
 
 
@@ -70,7 +164,7 @@ hidden_proto (__stack_user)
 
 /* Attribute handling.  */
 extern struct pthread_attr *__attr_list attribute_hidden;
-extern lll_lock_t __attr_list_lock attribute_hidden;
+extern int __attr_list_lock attribute_hidden;
 
 /* First available RT signal.  */
 extern int __current_sigrtmin attribute_hidden;
@@ -87,6 +181,19 @@ hidden_proto (__pthread_keys)
 /* Number of threads running.  */
 extern unsigned int __nptl_nthreads attribute_hidden;
 
+#ifndef __ASSUME_SET_ROBUST_LIST
+/* Negative if we do not have the system call and we can use it.  */
+extern int __set_robust_list_avail attribute_hidden;
+#endif
+
+/* Thread Priority Protection.  */
+extern int __sched_fifo_min_prio attribute_hidden;
+extern int __sched_fifo_max_prio attribute_hidden;
+extern void __init_sched_fifo_prio (void) attribute_hidden;
+extern int __pthread_tpp_change_priority (int prev_prio, int new_prio)
+     attribute_hidden;
+extern int __pthread_current_priority (void) attribute_hidden;
+
 /* The library can run in debugging mode where it performs a lot more
    tests.  */
 extern int __pthread_debug attribute_hidden;
@@ -108,8 +215,8 @@ extern int __pthread_debug attribute_hidden;
 /* Cancellation test.  */
 #define CANCELLATION_P(self) \
   do {                                                                       \
-    int _cancelhandling = THREAD_GETMEM (self, cancelhandling);                      \
-    if (CANCEL_ENABLED_AND_CANCELED (_cancelhandling))                       \
+    int cancelhandling = THREAD_GETMEM (self, cancelhandling);               \
+    if (CANCEL_ENABLED_AND_CANCELED (cancelhandling))                        \
       {                                                                              \
        THREAD_SETMEM (self, result, PTHREAD_CANCELED);                       \
        __do_cancel ();                                                       \
@@ -140,6 +247,7 @@ hidden_proto (__pthread_register_cancel)
 hidden_proto (__pthread_unregister_cancel)
 # ifdef SHARED
 extern void attribute_hidden pthread_cancel_init (void);
+extern void __unwind_freeres (void);
 # endif
 #endif
 
@@ -174,22 +282,22 @@ __do_cancel (void)
 # define LIBC_CANCEL_RESET(oldtype) \
   __libc_disable_asynccancel (oldtype)
 # define LIBC_CANCEL_HANDLED() \
-  __asm (".globl " __USER_LABEL_PREFIX__ "__libc_enable_asynccancel"); \
-  __asm (".globl " __USER_LABEL_PREFIX__ "__libc_disable_asynccancel")
+  __asm__ (".globl " __USER_LABEL_PREFIX__ "__libc_enable_asynccancel"); \
+  __asm__ (".globl " __USER_LABEL_PREFIX__ "__libc_disable_asynccancel")
 #elif defined NOT_IN_libc && defined IS_IN_libpthread
 # define LIBC_CANCEL_ASYNC() CANCEL_ASYNC ()
 # define LIBC_CANCEL_RESET(val) CANCEL_RESET (val)
 # define LIBC_CANCEL_HANDLED() \
-  __asm (".globl " __USER_LABEL_PREFIX__ "__pthread_enable_asynccancel"); \
-  __asm (".globl " __USER_LABEL_PREFIX__ "__pthread_disable_asynccancel")
+  __asm__ (".globl " __USER_LABEL_PREFIX__ "__pthread_enable_asynccancel"); \
+  __asm__ (".globl " __USER_LABEL_PREFIX__ "__pthread_disable_asynccancel")
 #elif defined NOT_IN_libc && defined IS_IN_librt
 # define LIBC_CANCEL_ASYNC() \
   __librt_enable_asynccancel ()
 # define LIBC_CANCEL_RESET(val) \
   __librt_disable_asynccancel (val)
 # define LIBC_CANCEL_HANDLED() \
-  __asm (".globl " __USER_LABEL_PREFIX__ "__librt_enable_asynccancel"); \
-  __asm (".globl " __USER_LABEL_PREFIX__ "__librt_disable_asynccancel")
+  __asm__ (".globl " __USER_LABEL_PREFIX__ "__librt_enable_asynccancel"); \
+  __asm__ (".globl " __USER_LABEL_PREFIX__ "__librt_disable_asynccancel")
 #else
 # define LIBC_CANCEL_ASYNC()   0 /* Just a dummy value.  */
 # define LIBC_CANCEL_RESET(val)        ((void)(val)) /* Nothing, but evaluate it.  */
@@ -263,11 +371,13 @@ hidden_proto (__nptl_death_event)
 #ifdef TLS_MULTIPLE_THREADS_IN_TCB
 extern void __libc_pthread_init (unsigned long int *ptr,
                                 void (*reclaim) (void),
-                                const struct pthread_functions *functions);
+                                const struct pthread_functions *functions)
+     internal_function;
 #else
 extern int *__libc_pthread_init (unsigned long int *ptr,
                                 void (*reclaim) (void),
-                                const struct pthread_functions *functions);
+                                const struct pthread_functions *functions)
+     internal_function;
 
 /* Variable set to a nonzero value if more than one thread runs or ran.  */
 extern int __pthread_multiple_threads attribute_hidden;
@@ -307,6 +417,7 @@ extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
 extern int __pthread_mutex_lock_internal (pthread_mutex_t *__mutex)
      attribute_hidden;
 extern int __pthread_mutex_cond_lock (pthread_mutex_t *__mutex);
+extern void __pthread_mutex_cond_lock_adjust (pthread_mutex_t *__mutex);
 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
 extern int __pthread_mutex_unlock_internal (pthread_mutex_t *__mutex)
      attribute_hidden;
@@ -454,10 +565,25 @@ extern void __nptl_deallocate_tsd (void) attribute_hidden;
 
 extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
 
+extern void __free_stacks (size_t limit) attribute_hidden;
+
+extern void __wait_lookup_done (void) attribute_hidden;
+
 #ifdef SHARED
 # define PTHREAD_STATIC_FN_REQUIRE(name)
 #else
-# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
+# define PTHREAD_STATIC_FN_REQUIRE(name) __asm__ (".globl " #name);
+#endif
+
+
+#ifndef __NR_set_robust_list
+/* XXX For the time being...  Once we can rely on the kernel headers
+   having the definition remove these lines.  */
+# if defined __i386__
+#  define __NR_set_robust_list  311
+# elif defined __x86_64__
+#  define __NR_set_robust_list  273
+# endif
 #endif
 
 #endif /* pthreadP.h */
index 5e9b352..a11d568 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -38,7 +38,8 @@
 #include <fork.h>
 
 /* This is defined by newer gcc version unique for each module.  */
-extern void *__dso_handle __attribute__ ((__weak__));
+extern void *__dso_handle __attribute__ ((__weak__,
+                                         __visibility__ ("hidden")));
 
 
 /* Hide the symbol so that no definition but the one locally in the
@@ -52,4 +53,4 @@ __pthread_atfork (
   return __register_atfork (prepare, parent, child,
                            &__dso_handle == NULL ? NULL : __dso_handle);
 }
-strong_alias(__pthread_atfork, pthread_atfork)
+strong_alias (__pthread_atfork, pthread_atfork)
index f9e446a..b8e6a37 100644 (file)
@@ -24,7 +24,8 @@
 #include "pthreadP.h"
 
 int
-__pthread_attr_destroy (pthread_attr_t *attr)
+__pthread_attr_destroy (
+     pthread_attr_t *attr)
 {
   struct pthread_attr *iattr;
 
index db0720e..5f549ba 100644 (file)
@@ -22,7 +22,9 @@
 
 
 int
-__pthread_attr_getdetachstate (const pthread_attr_t *attr, int *detachstate)
+__pthread_attr_getdetachstate (
+     const pthread_attr_t *attr,
+     int *detachstate)
 {
   struct pthread_attr *iattr;
 
index 161282c..65ce4e5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 struct pthread_attr *__attr_list;
-lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER;
+int __attr_list_lock = LLL_LOCK_INITIALIZER;
 
 
 int
-__pthread_attr_init_2_1 (pthread_attr_t *attr)
+__pthread_attr_init_2_1 (
+     pthread_attr_t *attr)
 {
   struct pthread_attr *iattr;
 
index 137fbe7..741e641 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2004, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -31,6 +31,12 @@ __pthread_attr_setschedparam (
   assert (sizeof (*attr) >= sizeof (struct pthread_attr));
   struct pthread_attr *iattr = (struct pthread_attr *) attr;
 
+  int min = sched_get_priority_min (iattr->schedpolicy);
+  int max = sched_get_priority_max (iattr->schedpolicy);
+  if (min == -1 || max == -1
+      || param->sched_priority > max || param->sched_priority < min)
+    return EINVAL;
+
   /* Copy the new values.  */
   memcpy (&iattr->schedparam, param, sizeof (struct sched_param));
 
index 80780a0..d400f3d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
index 628c1c3..2afe5b3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 int
-pthread_barrier_destroy (pthread_barrier_t *barrier)
+pthread_barrier_destroy (
+     pthread_barrier_t *barrier)
 {
   struct pthread_barrier *ibarrier;
   int result = EBUSY;
 
   ibarrier = (struct pthread_barrier *) barrier;
 
-  lll_lock (ibarrier->lock);
+  lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
     /* The barrier is not used anymore.  */
     result = 0;
   else
     /* Still used, return with an error.  */
-    lll_unlock (ibarrier->lock);
+    lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   return result;
 }
index 98a7ecc..f0396f9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <errno.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
+#include <bits/kernel-features.h>
+
+
+static const struct pthread_barrierattr default_attr =
+  {
+    .pshared = PTHREAD_PROCESS_PRIVATE
+  };
 
 
 int
@@ -33,17 +40,15 @@ pthread_barrier_init (
   if (__builtin_expect (count == 0, 0))
     return EINVAL;
 
-  if (attr != NULL)
-    {
-      struct pthread_barrierattr *iattr;
-
-      iattr = (struct pthread_barrierattr *) attr;
+  const struct pthread_barrierattr *iattr
+    = (attr != NULL
+       ? iattr = (struct pthread_barrierattr *) attr
+       : &default_attr);
 
-      if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
-         && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
-       /* Invalid attribute.  */
-       return EINVAL;
-    }
+  if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+      && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
+    /* Invalid attribute.  */
+    return EINVAL;
 
   ibarrier = (struct pthread_barrier *) barrier;
 
@@ -53,5 +58,14 @@ pthread_barrier_init (
   ibarrier->init_count = count;
   ibarrier->curr_event = 0;
 
+#ifdef __ASSUME_PRIVATE_FUTEX
+  ibarrier->private = (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+                      ? 0 : FUTEX_PRIVATE_FLAG);
+#else
+  ibarrier->private = (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+                      ? 0 : THREAD_GETMEM (THREAD_SELF,
+                                           header.private_futex));
+#endif
+
   return 0;
 }
index 3188523..4a958bc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,7 +26,8 @@
 
 
 int
-pthread_cancel (pthread_t th)
+pthread_cancel (
+     pthread_t th)
 {
   volatile struct pthread *pd = (volatile struct pthread *) th;
 
@@ -43,6 +44,7 @@ pthread_cancel (pthread_t th)
   int newval;
   do
     {
+    again:
       oldval = pd->cancelhandling;
       newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
 
@@ -58,7 +60,10 @@ pthread_cancel (pthread_t th)
       if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
        {
          /* Mark the cancellation as "in progress".  */
-         atomic_bit_set (&pd->cancelhandling, CANCELING_BIT);
+         if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
+                                                   oldval | CANCELING_BITMASK,
+                                                   oldval))
+           goto again;
 
          /* The cancellation handler will take care of marking the
             thread as canceled.  */
index ebc6d78..b67b535 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 int
-__pthread_cond_destroy (pthread_cond_t *cond)
+__pthread_cond_destroy (
+     pthread_cond_t *cond)
 {
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+               ? LLL_SHARED : LLL_PRIVATE;
+
   /* Make sure we are alone.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, pshared);
 
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
     {
       /* If there are still some waiters which have not been
         woken up, this is an application bug.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, pshared);
       return EBUSY;
     }
 
@@ -42,15 +46,36 @@ __pthread_cond_destroy (pthread_cond_t *cond)
      broadcasted, but still are using the pthread_cond_t structure,
      pthread_cond_destroy needs to wait for them.  */
   unsigned int nwaiters = cond->__data.__nwaiters;
-  while (nwaiters >= (1 << COND_CLOCK_BITS))
+
+  if (nwaiters >= (1 << COND_NWAITERS_SHIFT))
     {
-      lll_mutex_unlock (cond->__data.__lock);
+      /* Wake everybody on the associated mutex in case there are
+         threads that have been requeued to it.
+         Without this, pthread_cond_destroy could block potentially
+         for a long time or forever, as it would depend on other
+         thread's using the mutex.
+         When all threads waiting on the mutex are woken up, pthread_cond_wait
+         only waits for threads to acquire and release the internal
+         condvar lock.  */
+      if (cond->__data.__mutex != NULL
+         && cond->__data.__mutex != (void *) ~0l)
+       {
+         pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
+         lll_futex_wake (&mut->__data.__lock, INT_MAX,
+                         PTHREAD_MUTEX_PSHARED (mut));
+       }
+
+      do
+       {
+         lll_unlock (cond->__data.__lock, pshared);
 
-      lll_futex_wait (&cond->__data.__nwaiters, nwaiters);
+         lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared);
 
-      lll_mutex_lock (cond->__data.__lock);
+         lll_lock (cond->__data.__lock, pshared);
 
-      nwaiters = cond->__data.__nwaiters;
+         nwaiters = cond->__data.__nwaiters;
+       }
+      while (nwaiters >= (1 << COND_NWAITERS_SHIFT));
     }
 
   return 0;
index bf2ab31..dec6444 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -27,11 +28,12 @@ __pthread_cond_init (
 {
   struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
 
-  cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER;
+  cond->__data.__lock = LLL_LOCK_INITIALIZER;
   cond->__data.__futex = 0;
   cond->__data.__nwaiters = (icond_attr != NULL
-                            && ((icond_attr->value & (COND_CLOCK_BITS << 1))
-                                >> 1));
+                            ? ((icond_attr->value >> 1)
+                               & ((1 << COND_NWAITERS_SHIFT) - 1))
+                            : CLOCK_REALTIME);
   cond->__data.__total_seq = 0;
   cond->__data.__wakeup_seq = 0;
   cond->__data.__woken_seq = 0;
index 056515b..3603f84 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -26,6 +26,6 @@ pthread_condattr_getclock (
      clockid_t *clock_id)
 {
   *clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1)
-              & ((1 << COND_CLOCK_BITS) - 1));
+              & ((1 << COND_NWAITERS_SHIFT) - 1));
   return 0;
 }
index 32b60cf..87597c8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -62,11 +62,12 @@ pthread_condattr_setclock (
     return EINVAL;
 
   /* Make sure the value fits in the bits we reserved.  */
-  assert (clock_id < (1 << COND_CLOCK_BITS));
+  assert (clock_id < (1 << COND_NWAITERS_SHIFT));
 
   int *valuep = &((struct pthread_condattr *) attr)->value;
 
-  *valuep = (*valuep & ~(1 << (COND_CLOCK_BITS + 1)) & ~1) | (clock_id << 1);
+  *valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1))
+            | (clock_id << 1));
 
   return 0;
 }
index 903e28d..a61cd71 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -27,6 +27,7 @@
 #include <atomic.h>
 #include <libc-internal.h>
 #include <resolv.h>
+#include <bits/kernel-features.h>
 
 
 /* Local function to start thread and handle cleanup.  */
@@ -37,10 +38,10 @@ static int start_thread (void *arg);
 int __pthread_debug;
 
 /* Globally enabled events.  */
-static td_thr_events_t __nptl_threads_events;
+static td_thr_events_t __nptl_threads_events __attribute_used__;
 
 /* Pointer to descriptor with the last event.  */
-static struct pthread *__nptl_last_event;
+static struct pthread *__nptl_last_event __attribute_used__;
 
 /* Number of threads running.  */
 unsigned int __nptl_nthreads = 1;
@@ -50,17 +51,18 @@ unsigned int __nptl_nthreads = 1;
 #include "allocatestack.c"
 
 /* Code to create the thread.  */
-#include "createthread.c"
+#include <createthread.c>
 
 
 struct pthread *
 internal_function
-__find_in_stack_list (struct pthread *pd)
+__find_in_stack_list (
+     struct pthread *pd)
 {
   list_t *entry;
   struct pthread *result = NULL;
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   list_for_each (entry, &stack_used)
     {
@@ -87,7 +89,7 @@ __find_in_stack_list (struct pthread *pd)
          }
       }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   return result;
 }
@@ -203,6 +205,15 @@ __free_tcb (struct pthread *pd)
           running thread is gone.  */
        abort ();
 
+      /* Free TPP data.  */
+      if (__builtin_expect (pd->tpp != NULL, 0))
+       {
+         struct priority_protection_data *tpp = pd->tpp;
+
+         pd->tpp = NULL;
+         free (tpp);
+       }
+
       /* Queue the stack memory block for reuse and exit the process.  The
         kernel will signal via writing to the address returned by
         QUEUE-STACK when the stack is available.  */
@@ -226,6 +237,32 @@ start_thread (void *arg)
   /* Initialize resolver state pointer.  */
   __resp = &pd->res;
 
+#ifdef __NR_set_robust_list
+# ifndef __ASSUME_SET_ROBUST_LIST
+  if (__set_robust_list_avail >= 0)
+# endif
+    {
+      INTERNAL_SYSCALL_DECL (err);
+      /* This call should never fail because the initial call in init.c
+        succeeded.  */
+      INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
+                       sizeof (struct robust_list_head));
+    }
+#endif
+
+  /* If the parent was running cancellation handlers while creating
+     the thread the new thread inherited the signal mask.  Reset the
+     cancellation signal mask.  */
+  if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
+    {
+      INTERNAL_SYSCALL_DECL (err);
+      sigset_t mask;
+      __sigemptyset (&mask);
+      __sigaddset (&mask, SIGCANCEL);
+      (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
+                              NULL, _NSIG / 8);
+    }
+
   /* This is where the try/finally block should be created.  For
      compilers without that support we do use setjmp.  */
   struct pthread_unwind_buf unwind_buf;
@@ -246,9 +283,9 @@ start_thread (void *arg)
          int oldtype = CANCEL_ASYNC ();
 
          /* Get the lock the parent locked to force synchronization.  */
-         lll_lock (pd->lock);
+         lll_lock (pd->lock, LLL_PRIVATE);
          /* And give it up right away.  */
-         lll_unlock (pd->lock);
+         lll_unlock (pd->lock, LLL_PRIVATE);
 
          CANCEL_RESET (oldtype);
        }
@@ -264,6 +301,9 @@ start_thread (void *arg)
   /* Run the destructor for the thread-local data.  */
   __nptl_deallocate_tsd ();
 
+  /* Clean up any state libc stored in thread-local variables.  */
+  __libc_thread_freeres ();
+
   /* If this is the last thread we terminate the process now.  We
      do not notify the debugger, it might just irritate it if there
      is no thread left.  */
@@ -304,10 +344,65 @@ start_thread (void *arg)
      the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE.  */
   atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
 
+#ifndef __ASSUME_SET_ROBUST_LIST
+  /* If this thread has any robust mutexes locked, handle them now.  */
+# if __WORDSIZE == 64
+  void *robust = pd->robust_head.list;
+# else
+  __pthread_slist_t *robust = pd->robust_list.__next;
+# endif
+  /* We let the kernel do the notification if it is able to do so.
+     If we have to do it here there for sure are no PI mutexes involved
+     since the kernel support for them is even more recent.  */
+  if (__set_robust_list_avail < 0
+      && __builtin_expect (robust != (void *) &pd->robust_head, 0))
+    {
+      do
+       {
+         struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
+           ((char *) robust - offsetof (struct __pthread_mutex_s,
+                                        __list.__next));
+         robust = *((void **) robust);
+
+# ifdef __PTHREAD_MUTEX_HAVE_PREV
+         this->__list.__prev = NULL;
+# endif
+         this->__list.__next = NULL;
+
+         lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
+       }
+      while (robust != (void *) &pd->robust_head);
+    }
+#endif
+
+  /* Mark the memory of the stack as usable to the kernel.  We free
+     everything except for the space used for the TCB itself.  */
+  size_t pagesize_m1 = __getpagesize () - 1;
+#ifdef _STACK_GROWS_DOWN
+  char *sp = CURRENT_STACK_FRAME;
+  size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
+#else
+# error "to do"
+#endif
+  assert (freesize < pd->stackblock_size);
+  if (freesize > PTHREAD_STACK_MIN)
+    madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
+
   /* If the thread is detached free the TCB.  */
   if (IS_DETACHED (pd))
     /* Free the TCB.  */
     __free_tcb (pd);
+  else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
+    {
+      /* Some other thread might call any of the setXid functions and expect
+        us to reply.  In this case wait until we did that.  */
+      do
+       lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
+      while (pd->cancelhandling & SETXID_BITMASK);
+
+      /* Reset the value so that the stack can be reused.  */
+      pd->setxid_futex = 0;
+    }
 
   /* We cannot call '_exit' here.  '_exit' will terminate the process.
 
@@ -348,7 +443,7 @@ __pthread_create_2_1 (
        accessing far-away memory.  */
     iattr = &default_attr;
 
-  struct pthread *pd = 0;
+  struct pthread *pd = NULL;
   int err = ALLOCATE_STACK (iattr, &pd);
   if (__builtin_expect (err != 0, 0))
     /* Something went wrong.  Maybe a parameter of the attributes is
@@ -398,6 +493,11 @@ __pthread_create_2_1 (
   THREAD_COPY_STACK_GUARD (pd);
 #endif
 
+  /* Copy the pointer guard value.  */
+#ifdef THREAD_COPY_POINTER_GUARD
+  THREAD_COPY_POINTER_GUARD (pd);
+#endif
+
   /* Determine scheduling parameters for the thread.  */
   if (attr != NULL
       && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
@@ -468,12 +568,14 @@ weak_alias(__pthread_create_2_1, pthread_create)
 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
    functions to be present as well.  */
 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
+PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
 
 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
 
 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
+PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
 
index 00b9ba3..e0e6251 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -39,7 +39,7 @@ pthread_getattr_np (
   struct pthread_attr *iattr = (struct pthread_attr *) attr;
   int ret = 0;
 
-  lll_lock (thread->lock);
+  lll_lock (thread->lock, LLL_PRIVATE);
 
   /* The thread library is responsible for keeping the values in the
      thread desriptor up-to-date in case the user changes them.  */
@@ -79,51 +79,55 @@ pthread_getattr_np (
       if (fp == NULL)
        ret = errno;
       /* We need the limit of the stack in any case.  */
-      else if (getrlimit (RLIMIT_STACK, &rl) != 0)
-       ret = errno;
       else
        {
-         /* We need no locking.  */
-         __fsetlocking (fp, FSETLOCKING_BYCALLER);
+         if (getrlimit (RLIMIT_STACK, &rl) != 0)
+           ret = errno;
+         else
+           {
+             /* We need no locking.  */
+             __fsetlocking (fp, FSETLOCKING_BYCALLER);
 
-         /* Until we found an entry (which should always be the case)
-            mark the result as a failure.  */
-         ret = ENOENT;
+             /* Until we found an entry (which should always be the case)
+                mark the result as a failure.  */
+             ret = ENOENT;
 
-         char *line = NULL;
-         size_t linelen = 0;
-         uintptr_t last_to = 0;
+             char *line = NULL;
+             size_t linelen = 0;
+             uintptr_t last_to = 0;
 
-         while (! feof_unlocked (fp))
-           {
-             if (getdelim (&line, &linelen, '\n', fp) <= 0)
-               break;
-
-             uintptr_t from;
-             uintptr_t to;
-             if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
-               continue;
-             if (from <= (uintptr_t) __libc_stack_end
-                 && (uintptr_t) __libc_stack_end < to)
+             while (! feof_unlocked (fp))
                {
-                 /* Found the entry.  Now we have the info we need.  */
-                 iattr->stacksize = rl.rlim_cur;
-                 iattr->stackaddr = (void *) to;
-
-                 /* The limit might be too high.  */
-                 if ((size_t) iattr->stacksize
-                     > (size_t) iattr->stackaddr - last_to)
-                   iattr->stacksize = (size_t) iattr->stackaddr - last_to;
-
-                 /* We succeed and no need to look further.  */
-                 ret = 0;
-                 break;
+                 if (__getdelim (&line, &linelen, '\n', fp) <= 0)
+                   break;
+
+                 uintptr_t from;
+                 uintptr_t to;
+                 if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
+                   continue;
+                 if (from <= (uintptr_t) __libc_stack_end
+                     && (uintptr_t) __libc_stack_end < to)
+                   {
+                     /* Found the entry.  Now we have the info we need.  */
+                     iattr->stacksize = rl.rlim_cur;
+                     iattr->stackaddr = (void *) to;
+
+                     /* The limit might be too high.  */
+                     if ((size_t) iattr->stacksize
+                         > (size_t) iattr->stackaddr - last_to)
+                       iattr->stacksize = (size_t) iattr->stackaddr - last_to;
+
+                     /* We succeed and no need to look further.  */
+                     ret = 0;
+                     break;
+                   }
+                 last_to = to;
                }
-             last_to = to;
+
+             free (line);
            }
 
          fclose (fp);
-         free (line);
        }
     }
 
@@ -160,12 +164,16 @@ pthread_getattr_np (
        {
          free (cpuset);
          if (ret == ENOSYS)
-           /* There is no such functionality.  */
-           ret = 0;
+           {
+             /* There is no such functionality.  */
+             ret = 0;
+             iattr->cpuset = NULL;
+             iattr->cpusetsize = 0;
+           }
        }
     }
 
-  lll_unlock (thread->lock);
+  lll_unlock (thread->lock, LLL_PRIVATE);
 
   return ret;
 }
index 2dd33ac..bb5f0a9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -25,9 +25,9 @@
 
 int
 __pthread_getschedparam (
-        pthread_t threadid,
-        int *policy,
-        struct sched_param *param)
+     pthread_t threadid,
+     int *policy,
+     struct sched_param *param)
 {
   struct pthread *pd = (struct pthread *) threadid;
 
@@ -38,7 +38,7 @@ __pthread_getschedparam (
 
   int result = 0;
 
-  lll_lock (pd->lock);
+  lll_lock (pd->lock, LLL_PRIVATE);
 
   /* The library is responsible for maintaining the values at all
      times.  If the user uses a interface other than
@@ -68,7 +68,7 @@ __pthread_getschedparam (
       memcpy (param, &pd->schedparam, sizeof (struct sched_param));
     }
 
-  lll_unlock (pd->lock);
+  lll_unlock (pd->lock, LLL_PRIVATE);
 
   return result;
 }
index 977dbcf..ce6cf6f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <errno.h>
 #include <stdlib.h>
 
-#include "atomic.h"
+#include <atomic.h>
 #include "pthreadP.h"
 
 
 static void
 cleanup (void *arg)
 {
-  *(void **) arg = NULL;
+  /* If we already changed the waiter ID, reset it.  The call cannot
+     fail for any reason but the thread not having done that yet so
+     there is no reason for a loop.  */
+  (void) atomic_compare_and_exchange_bool_acq ((struct pthread **) arg, NULL,
+                                              THREAD_SELF);
 }
 
 
 int
-pthread_join (pthread_t threadid, void **thread_return)
+pthread_join (
+     pthread_t threadid,
+     void **thread_return)
 {
-  struct pthread *self;
   struct pthread *pd = (struct pthread *) threadid;
 
   /* Make sure the descriptor is valid.  */
@@ -47,12 +52,23 @@ pthread_join (pthread_t threadid, void **thread_return)
     /* We cannot wait for the thread.  */
     return EINVAL;
 
-  self = THREAD_SELF;
-  if (pd == self
-      || (self->joinid == pd
-         && (pd->cancelhandling
-             & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
-                | TERMINATED_BITMASK)) == 0))
+  struct pthread *self = THREAD_SELF;
+  int result = 0;
+
+  /* During the wait we change to asynchronous cancellation.  If we
+     are canceled the thread we are waiting for must be marked as
+     un-wait-ed for again.  */
+  pthread_cleanup_push (cleanup, &pd->joinid);
+
+  /* Switch to asynchronous cancellation.  */
+  int oldtype = CANCEL_ASYNC ();
+
+  if ((pd == self
+       || (self->joinid == pd
+          && (pd->cancelhandling
+              & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
+                 | TERMINATED_BITMASK)) == 0))
+      && !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
     /* This is a deadlock situation.  The threads are waiting for each
        other to finish.  Note that this is a "may" error.  To be 100%
        sure we catch this error we would have to lock the data
@@ -60,28 +76,17 @@ pthread_join (pthread_t threadid, void **thread_return)
        two threads are really caught in this situation they will
        deadlock.  It is the programmer's problem to figure this
        out.  */
-    return EDEADLK;
-
+    result = EDEADLK;
   /* Wait for the thread to finish.  If it is already locked something
      is wrong.  There can only be one waiter.  */
-  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
-                                                             self,
-                                                             NULL), 0))
+  else if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
+                                                                  self,
+                                                                  NULL), 0))
     /* There is already somebody waiting for the thread.  */
-    return EINVAL;
-
-
-  /* During the wait we change to asynchronous cancellation.  If we
-     are cancelled the thread we are waiting for must be marked as
-     un-wait-ed for again.  */
-  pthread_cleanup_push (cleanup, &pd->joinid);
-
-  /* Switch to asynchronous cancellation.  */
-  int oldtype = CANCEL_ASYNC ();
-
-
-  /* Wait for the child.  */
-  lll_wait_tid (pd->tid);
+    result = EINVAL;
+  else
+    /* Wait for the child.  */
+    lll_wait_tid (pd->tid);
 
 
   /* Restore cancellation mode.  */
@@ -91,16 +96,19 @@ pthread_join (pthread_t threadid, void **thread_return)
   pthread_cleanup_pop (0);
 
 
-  /* We mark the thread as terminated and as joined.  */
-  pd->tid = -1;
+  if (__builtin_expect (result == 0, 1))
+    {
+      /* We mark the thread as terminated and as joined.  */
+      pd->tid = -1;
 
-  /* Store the return value if the caller is interested.  */
-  if (thread_return != NULL)
-    *thread_return = pd->result;
+      /* Store the return value if the caller is interested.  */
+      if (thread_return != NULL)
+       *thread_return = pd->result;
 
 
-  /* Free the TCB.  */
-  __free_tcb (pd);
+      /* Free the TCB.  */
+      __free_tcb (pd);
+    }
 
-  return 0;
+  return result;
 }
index 88d160e..3744c08 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <errno.h>
 #include "pthreadP.h"
+#include <atomic.h>
 
 
-/* Internal mutex for __pthread_keys table handling.  */
-lll_lock_t __pthread_keys_lock = LLL_LOCK_INITIALIZER;
-
 int
 __pthread_key_create (
      pthread_key_t *key,
      void (*destr) (void *))
 {
-  int result = EAGAIN;
-  size_t cnt;
-
-  lll_lock (__pthread_keys_lock);
-
   /* Find a slot in __pthread_kyes which is unused.  */
-  for (cnt = 0; cnt < PTHREAD_KEYS_MAX; ++cnt)
-    if (KEY_UNUSED (__pthread_keys[cnt].seq)
-       && KEY_USABLE (__pthread_keys[cnt].seq))
-      {
-       /* We found an unused slot.  */
-       ++__pthread_keys[cnt].seq;
-
-       /* Remember the destructor.  */
-       __pthread_keys[cnt].destr = destr;
-
-       /* Return the key to the caller.  */
-       *key = cnt;
-
-       /* The call succeeded.  */
-       result = 0;
-
-       /* We found a key and can stop now.  */
-       break;
-      }
-
-  lll_unlock (__pthread_keys_lock);
-
-  return result;
+  for (size_t cnt = 0; cnt < PTHREAD_KEYS_MAX; ++cnt)
+    {
+      uintptr_t seq = __pthread_keys[cnt].seq;
+
+      if (KEY_UNUSED (seq) && KEY_USABLE (seq)
+         /* We found an unused slot.  Try to allocate it.  */
+         && ! atomic_compare_and_exchange_bool_acq (&__pthread_keys[cnt].seq,
+                                                    seq + 1, seq))
+       {
+         /* Remember the destructor.  */
+         __pthread_keys[cnt].destr = destr;
+
+         /* Return the key to the caller.  */
+         *key = cnt;
+
+         /* The call succeeded.  */
+         return 0;
+       }
+    }
+
+  return EAGAIN;
 }
 strong_alias (__pthread_key_create, pthread_key_create)
 strong_alias (__pthread_key_create, __pthread_key_create_internal)
diff --git a/libpthread/nptl/pthread_mutex_consistent.c b/libpthread/nptl/pthread_mutex_consistent.c
new file mode 100644 (file)
index 0000000..1e8f074
--- /dev/null
@@ -0,0 +1,37 @@
+/* Copyright (C) 2005, 2006, 2010 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <pthreadP.h>
+
+
+int
+pthread_mutex_consistent (
+     pthread_mutex_t *mutex)
+{
+  /* Test whether this is a robust mutex with a dead owner.  */
+  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
+      || mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
+    return EINVAL;
+
+  mutex->__data.__owner = THREAD_GETMEM (THREAD_SELF, tid);
+
+  return 0;
+}
+weak_alias (pthread_mutex_consistent, pthread_mutex_consistent_np)
index 05bd96a..f487d61 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 int
-__pthread_mutex_destroy (pthread_mutex_t *mutex)
+__pthread_mutex_destroy (
+     pthread_mutex_t *mutex)
 {
-  if (mutex->__data.__nusers != 0)
+  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
+      && mutex->__data.__nusers != 0)
     return EBUSY;
 
+  /* Set to an invalid value.  */
+  mutex->__data.__kind = -1;
+
   return 0;
 }
 strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
+INTDEF(__pthread_mutex_destroy)
diff --git a/libpthread/nptl/pthread_mutex_getprioceiling.c b/libpthread/nptl/pthread_mutex_getprioceiling.c
new file mode 100644 (file)
index 0000000..1ce5eae
--- /dev/null
@@ -0,0 +1,38 @@
+/* Get current priority ceiling of pthread_mutex_t.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <pthreadP.h>
+
+
+int
+pthread_mutex_getprioceiling (mutex, prioceiling)
+     const pthread_mutex_t *mutex;
+     int *prioceiling;
+{
+  if (__builtin_expect ((mutex->__data.__kind
+                        & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0, 0))
+    return EINVAL;
+
+  *prioceiling = (mutex->__data.__lock & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+                >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+  return 0;
+}
index 2349497..dd6e6d6 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <assert.h>
+#include <errno.h>
 #include <string.h>
+#include <bits/kernel-features.h>
 #include "pthreadP.h"
 
-
 static const struct pthread_mutexattr default_attr =
   {
     /* Default is a normal mutex, not shared between processes.  */
@@ -29,6 +31,11 @@ static const struct pthread_mutexattr default_attr =
   };
 
 
+#ifndef __ASSUME_FUTEX_LOCK_PI
+static int tpi_supported;
+#endif
+
+
 int
 __pthread_mutex_init (
      pthread_mutex_t *mutex,
@@ -40,18 +47,95 @@ __pthread_mutex_init (
 
   imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;
 
+  /* Sanity checks.  */
+  switch (__builtin_expect (imutexattr->mutexkind
+                           & PTHREAD_MUTEXATTR_PROTOCOL_MASK,
+                           PTHREAD_PRIO_NONE
+                           << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
+    {
+    case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
+      break;
+
+    case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
+#ifndef __ASSUME_FUTEX_LOCK_PI
+      if (__builtin_expect (tpi_supported == 0, 0))
+       {
+         int lock = 0;
+         INTERNAL_SYSCALL_DECL (err);
+         int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI,
+                                     0, 0);
+         assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
+         tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
+       }
+      if (__builtin_expect (tpi_supported < 0, 0))
+       return ENOTSUP;
+#endif
+      break;
+
+    default:
+      /* XXX: For now we don't support robust priority protected mutexes.  */
+      if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
+       return ENOTSUP;
+      break;
+    }
+
   /* Clear the whole variable.  */
   memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
 
   /* Copy the values from the attribute.  */
-  mutex->__data.__kind = imutexattr->mutexkind & ~0x80000000;
+  mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
+
+  if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
+    {
+#ifndef __ASSUME_SET_ROBUST_LIST
+      if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
+         && __set_robust_list_avail < 0)
+       return ENOTSUP;
+#endif
+
+      mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+    }
+
+  switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
+    {
+    case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
+      mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
+      break;
+
+    case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
+      mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
+
+      int ceiling = (imutexattr->mutexkind
+                    & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
+                   >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
+      if (! ceiling)
+       {
+         if (__sched_fifo_min_prio == -1)
+           __init_sched_fifo_prio ();
+         if (ceiling < __sched_fifo_min_prio)
+           ceiling = __sched_fifo_min_prio;
+       }
+      mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+      break;
+
+    default:
+      break;
+    }
+
+  /* The kernel when waking robust mutexes on exit never uses
+     FUTEX_PRIVATE_FLAG FUTEX_WAKE.  */
+  if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
+                               | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
+    mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
 
   /* Default values: mutex not used yet.  */
   // mutex->__count = 0;       already done by memset
   // mutex->__owner = 0;       already done by memset
   // mutex->__nusers = 0;      already done by memset
   // mutex->__spins = 0;       already done by memset
+  // mutex->__next = NULL;     already done by memset
 
   return 0;
 }
 strong_alias (__pthread_mutex_init, pthread_mutex_init)
+INTDEF(__pthread_mutex_init)
index faa5391..47e4d41 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <assert.h>
 #include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <not-cancel.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
 
 
 #ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
+# define LLL_MUTEX_LOCK(mutex) \
+  lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_TRYLOCK(mutex) \
+  lll_trylock ((mutex)->__data.__lock)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+  lll_robust_lock ((mutex)->__data.__lock, id, \
+                  PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
 #endif
 
 
+static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
+     __attribute_noinline__;
+
+
 int
-__pthread_mutex_lock (pthread_mutex_t *mutex)
+__pthread_mutex_lock (
+     pthread_mutex_t *mutex)
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
+  unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
+  if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
+    return __pthread_mutex_lock_full (mutex);
+
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
-  switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
+  if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
+      == PTHREAD_MUTEX_TIMED_NP)
+    {
+    simple:
+      /* Normal mutex.  */
+      LLL_MUTEX_LOCK (mutex);
+      assert (mutex->__data.__owner == 0);
+    }
+  else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
     {
       /* Recursive mutex.  */
-    case PTHREAD_MUTEX_RECURSIVE_NP:
+
       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
        {
@@ -54,32 +79,17 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
        }
 
       /* We have to get the mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+      LLL_MUTEX_LOCK (mutex);
 
+      assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
-      break;
-
-      /* Error checking mutex.  */
-    case PTHREAD_MUTEX_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (mutex->__data.__owner == id)
-       return EDEADLK;
-
-      /* FALLTHROUGH */
-
-    default:
-      /* Correct code cannot set any other type.  */
-    case PTHREAD_MUTEX_TIMED_NP:
-    simple:
-      /* Normal mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
-      break;
-
-    case PTHREAD_MUTEX_ADAPTIVE_NP:
+    }
+  else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+    {
       if (! __is_smp)
        goto simple;
 
-      if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
+      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
        {
          int cnt = 0;
          int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -88,7 +98,7 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
            {
              if (cnt++ >= max_cnt)
                {
-                 LLL_MUTEX_LOCK (mutex->__data.__lock);
+                 LLL_MUTEX_LOCK (mutex);
                  break;
                }
 
@@ -96,15 +106,362 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
              BUSY_WAIT_NOP;
 #endif
            }
-         while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
+         while (LLL_MUTEX_TRYLOCK (mutex) != 0);
 
          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
        }
+      assert (mutex->__data.__owner == 0);
+    }
+  else
+    {
+      assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
+      /* Check whether we already hold the mutex.  */
+      if (__builtin_expect (mutex->__data.__owner == id, 0))
+       return EDEADLK;
+      goto simple;
+    }
+
+  /* Record the ownership.  */
+  mutex->__data.__owner = id;
+#ifndef NO_INCR
+  ++mutex->__data.__nusers;
+#endif
+
+  return 0;
+}
+
+static int
+__pthread_mutex_lock_full (pthread_mutex_t *mutex)
+{
+  int oldval;
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+  switch (PTHREAD_MUTEX_TYPE (mutex))
+    {
+    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                    &mutex->__data.__list.__next);
+
+      oldval = mutex->__data.__lock;
+      do
+       {
+       again:
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval = id;
+#ifdef NO_INCR
+             newval |= FUTEX_WAITERS;
+#else
+             newval |= (oldval & FUTEX_WAITERS);
+#endif
+
+             newval
+               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                      newval, oldval);
+
+             if (newval != oldval)
+               {
+                 oldval = newval;
+                 goto again;
+               }
+
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+             /* Note that we deliberately exit here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  If we are not supposed
+                to increment __nusers we actually have to decrement
+                it here.  */
+#ifdef NO_INCR
+             --mutex->__data.__nusers;
+#endif
+
+             return EOWNERDEAD;
+           }
+
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+           {
+             int kind = PTHREAD_MUTEX_TYPE (mutex);
+             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+                 return EDEADLK;
+               }
+
+             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
+
+                 ++mutex->__data.__count;
+
+                 return 0;
+               }
+           }
+
+         oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
+
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_unlock (mutex->__data.__lock,
+                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+             return ENOTRECOVERABLE;
+           }
+       }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
+
+      mutex->__data.__count = 1;
+      ENQUEUE_MUTEX (mutex);
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+      break;
+
+    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+       if (robust)
+         /* Note: robust PI futexes are signaled by setting bit 0.  */
+         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                  | 1));
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+               return EDEADLK;
+             }
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       int newval = id;
+#ifdef NO_INCR
+       newval |= FUTEX_WAITERS;
+#endif
+       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                     newval, 0);
+
+       if (oldval != 0)
+         {
+           /* The mutex is locked.  The kernel will now take care of
+              everything.  */
+           int private = (robust
+                          ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+                          : PTHREAD_MUTEX_PSHARED (mutex));
+           INTERNAL_SYSCALL_DECL (__err);
+           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                                     __lll_private_flag (FUTEX_LOCK_PI,
+                                                         private), 1, 0);
+
+           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
+               && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
+                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
+             {
+               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+                       || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+                           && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+               /* ESRCH can happen only for non-robust PI mutexes where
+                  the owner of the lock died.  */
+               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
+
+               /* Delay the thread indefinitely.  */
+               while (1)
+                 pause_not_cancel ();
+             }
+
+           oldval = mutex->__data.__lock;
+
+           assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
+         }
+
+       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+         {
+           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+           /* We got the mutex.  */
+           mutex->__data.__count = 1;
+           /* But it is inconsistent unless marked otherwise.  */
+           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+           /* Note that we deliberately exit here.  If we fall
+              through to the end of the function __nusers would be
+              incremented which is not correct because the old owner
+              has to be discounted.  If we are not supposed to
+              increment __nusers we actually have to decrement it here.  */
+#ifdef NO_INCR
+           --mutex->__data.__nusers;
+#endif
+
+           return EOWNERDEAD;
+         }
+
+       if (robust
+           && __builtin_expect (mutex->__data.__owner
+                                == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+         {
+           /* This mutex is now not recoverable.  */
+           mutex->__data.__count = 0;
+
+           INTERNAL_SYSCALL_DECL (__err);
+           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                             __lll_private_flag (FUTEX_UNLOCK_PI,
+                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
+                             0, 0);
+
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+           return ENOTRECOVERABLE;
+         }
+
+       mutex->__data.__count = 1;
+       if (robust)
+         {
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+         }
+      }
       break;
+
+    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PP_NORMAL_NP:
+    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (mutex->__data.__owner == id)
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             return EDEADLK;
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       int oldprio = -1, ceilval;
+       do
+         {
+           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+                         >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+           if (__pthread_current_priority () > ceiling)
+             {
+               if (oldprio != -1)
+                 __pthread_tpp_change_priority (oldprio, -1);
+               return EINVAL;
+             }
+
+           int retval = __pthread_tpp_change_priority (oldprio, ceiling);
+           if (retval)
+             return retval;
+
+           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+           oldprio = ceiling;
+
+           oldval
+             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+#ifdef NO_INCR
+                                                    ceilval | 2,
+#else
+                                                    ceilval | 1,
+#endif
+                                                    ceilval);
+
+           if (oldval == ceilval)
+             break;
+
+           do
+             {
+               oldval
+                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                        ceilval | 2,
+                                                        ceilval | 1);
+
+               if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+                 break;
+
+               if (oldval != ceilval)
+                 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
+                                 PTHREAD_MUTEX_PSHARED (mutex));
+             }
+           while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                       ceilval | 2, ceilval)
+                  != ceilval);
+         }
+       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
+
+       assert (mutex->__data.__owner == 0);
+       mutex->__data.__count = 1;
+      }
+      break;
+
+    default:
+      /* Correct code cannot set any other type.  */
+      return EINVAL;
     }
 
   /* Record the ownership.  */
-  assert (mutex->__data.__owner == 0);
   mutex->__data.__owner = id;
 #ifndef NO_INCR
   ++mutex->__data.__nusers;
@@ -116,3 +473,22 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
 strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
 #endif
+
+
+#ifdef NO_INCR
+void
+__pthread_mutex_cond_lock_adjust (
+     pthread_mutex_t *mutex)
+{
+  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
+  assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
+  assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
+
+  /* Record the ownership.  */
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+  mutex->__data.__owner = id;
+
+  if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
+    ++mutex->__data.__count;
+}
+#endif
diff --git a/libpthread/nptl/pthread_mutex_setprioceiling.c b/libpthread/nptl/pthread_mutex_setprioceiling.c
new file mode 100644 (file)
index 0000000..836c9a3
--- /dev/null
@@ -0,0 +1,119 @@
+/* Set current priority ceiling of pthread_mutex_t.
+   Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <stdbool.h>
+#include <errno.h>
+#include <pthreadP.h>
+
+
+int
+pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling)
+     pthread_mutex_t *mutex;
+     int prioceiling;
+     int *old_ceiling;
+{
+  /* The low bits of __kind aren't ever changed after pthread_mutex_init,
+     so we don't need a lock yet.  */
+  if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0)
+    return EINVAL;
+
+  if (__sched_fifo_min_prio == -1)
+    __init_sched_fifo_prio ();
+
+  if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0)
+      || __builtin_expect (prioceiling > __sched_fifo_max_prio, 0)
+      || __builtin_expect ((prioceiling
+                           & (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
+                              >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
+                          != prioceiling, 0))
+    return EINVAL;
+
+  /* Check whether we already hold the mutex.  */
+  bool locked = false;
+  int kind = PTHREAD_MUTEX_TYPE (mutex);
+  if (mutex->__data.__owner == THREAD_GETMEM (THREAD_SELF, tid))
+    {
+      if (kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP)
+       return EDEADLK;
+
+      if (kind == PTHREAD_MUTEX_PP_RECURSIVE_NP)
+       locked = true;
+    }
+
+  int oldval = mutex->__data.__lock;
+  if (! locked)
+    do
+      {
+       /* Need to lock the mutex, but without obeying the priority
+          protect protocol.  */
+       int ceilval = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK);
+
+       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                     ceilval | 1, ceilval);
+       if (oldval == ceilval)
+         break;
+
+       do
+         {
+           oldval
+             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                    ceilval | 2,
+                                                    ceilval | 1);
+
+           if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+             break;
+
+           if (oldval != ceilval)
+             lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
+                             PTHREAD_MUTEX_PSHARED (mutex));
+         }
+       while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                   ceilval | 2, ceilval)
+              != ceilval);
+
+       if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+         continue;
+      }
+    while (0);
+
+  int oldprio = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+               >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+  if (locked)
+    {
+      int ret = __pthread_tpp_change_priority (oldprio, prioceiling);
+      if (ret)
+       return ret;
+    }
+
+  if (old_ceiling != NULL)
+    *old_ceiling = oldprio;
+
+  int newlock = 0;
+  if (locked)
+    newlock = (mutex->__data.__lock & ~PTHREAD_MUTEX_PRIO_CEILING_MASK);
+  mutex->__data.__lock = newlock
+                        | (prioceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT);
+  atomic_full_barrier ();
+
+  lll_futex_wake (&mutex->__data.__lock, INT_MAX,
+                 PTHREAD_MUTEX_PSHARED (mutex));
+
+  return 0;
+}
index 6f3df6f..a4ee25d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <assert.h>
 #include <errno.h>
+#include <time.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
+#include <not-cancel.h>
 
 
 int
@@ -27,13 +30,15 @@ pthread_mutex_timedlock (
      pthread_mutex_t *mutex,
      const struct timespec *abstime)
 {
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
   int result = 0;
 
   /* We must not check ABSTIME here.  If the thread does not block
      abstime must not be checked for a valid value.  */
 
-  switch (mutex->__data.__kind)
+  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
+                           PTHREAD_MUTEX_TIMED_NP))
     {
       /* Recursive mutex.  */
     case PTHREAD_MUTEX_RECURSIVE_NP:
@@ -49,40 +54,38 @@ pthread_mutex_timedlock (
 
          goto out;
        }
-      else
-       {
-         /* We have to get the mutex.  */
-         result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
 
-         if (result != 0)
-           goto out;
+      /* We have to get the mutex.  */
+      result = lll_timedlock (mutex->__data.__lock, abstime,
+                             PTHREAD_MUTEX_PSHARED (mutex));
 
-         /* Only locked once so far.  */
-         mutex->__data.__count = 1;
-       }
+      if (result != 0)
+       goto out;
+
+      /* Only locked once so far.  */
+      mutex->__data.__count = 1;
       break;
 
       /* Error checking mutex.  */
     case PTHREAD_MUTEX_ERRORCHECK_NP:
       /* Check whether we already hold the mutex.  */
-      if (mutex->__data.__owner == id)
+      if (__builtin_expect (mutex->__data.__owner == id, 0))
        return EDEADLK;
 
       /* FALLTHROUGH */
 
-    default:
-      /* Correct code cannot set any other type.  */
     case PTHREAD_MUTEX_TIMED_NP:
     simple:
       /* Normal mutex.  */
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+      result = lll_timedlock (mutex->__data.__lock, abstime,
+                             PTHREAD_MUTEX_PSHARED (mutex));
       break;
 
     case PTHREAD_MUTEX_ADAPTIVE_NP:
       if (! __is_smp)
        goto simple;
 
-      if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+      if (lll_trylock (mutex->__data.__lock) != 0)
        {
          int cnt = 0;
          int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -91,7 +94,8 @@ pthread_mutex_timedlock (
            {
              if (cnt++ >= max_cnt)
                {
-                 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+                 result = lll_timedlock (mutex->__data.__lock, abstime,
+                                         PTHREAD_MUTEX_PSHARED (mutex));
                  break;
                }
 
@@ -99,11 +103,373 @@ pthread_mutex_timedlock (
              BUSY_WAIT_NOP;
 #endif
            }
-         while (lll_mutex_trylock (mutex->__data.__lock) != 0);
+         while (lll_trylock (mutex->__data.__lock) != 0);
 
          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
        }
       break;
+
+    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                    &mutex->__data.__list.__next);
+
+      oldval = mutex->__data.__lock;
+      do
+       {
+       again:
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval = id | (oldval & FUTEX_WAITERS);
+
+             newval
+               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                      newval, oldval);
+             if (newval != oldval)
+               {
+                 oldval = newval;
+                 goto again;
+               }
+
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+             /* Note that we deliberately exit here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  */
+             return EOWNERDEAD;
+           }
+
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+           {
+             int kind = PTHREAD_MUTEX_TYPE (mutex);
+             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+                 return EDEADLK;
+               }
+
+             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
+
+                 ++mutex->__data.__count;
+
+                 return 0;
+               }
+           }
+
+         result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
+                                        PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_unlock (mutex->__data.__lock,
+                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+             return ENOTRECOVERABLE;
+           }
+
+         if (result == ETIMEDOUT || result == EINVAL)
+           goto out;
+
+         oldval = result;
+       }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
+
+      mutex->__data.__count = 1;
+      ENQUEUE_MUTEX (mutex);
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+      break;
+
+    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+       if (robust)
+         /* Note: robust PI futexes are signaled by setting bit 0.  */
+         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                  | 1));
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+               return EDEADLK;
+             }
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                     id, 0);
+
+       if (oldval != 0)
+         {
+           /* The mutex is locked.  The kernel will now take care of
+              everything.  The timeout value must be a relative value.
+              Convert it.  */
+           int private = (robust
+                          ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+                          : PTHREAD_MUTEX_PSHARED (mutex));
+           INTERNAL_SYSCALL_DECL (__err);
+
+           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                                     __lll_private_flag (FUTEX_LOCK_PI,
+                                                         private), 1,
+                                     abstime);
+           if (INTERNAL_SYSCALL_ERROR_P (e, __err))
+             {
+               if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
+                 return ETIMEDOUT;
+
+               if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
+                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
+                 {
+                   assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+                           || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+                               && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+                   /* ESRCH can happen only for non-robust PI mutexes where
+                      the owner of the lock died.  */
+                   assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
+                           || !robust);
+
+                   /* Delay the thread until the timeout is reached.
+                      Then return ETIMEDOUT.  */
+                   struct timespec reltime;
+                   struct timespec now;
+
+                   INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
+                                     &now);
+                   reltime.tv_sec = abstime->tv_sec - now.tv_sec;
+                   reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
+                   if (reltime.tv_nsec < 0)
+                     {
+                       reltime.tv_nsec += 1000000000;
+                       --reltime.tv_sec;
+                     }
+                   if (reltime.tv_sec >= 0)
+                     while (nanosleep_not_cancel (&reltime, &reltime) != 0)
+                       continue;
+
+                   return ETIMEDOUT;
+                 }
+
+               return INTERNAL_SYSCALL_ERRNO (e, __err);
+             }
+
+           oldval = mutex->__data.__lock;
+
+           assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
+         }
+
+       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+         {
+           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+           /* We got the mutex.  */
+           mutex->__data.__count = 1;
+           /* But it is inconsistent unless marked otherwise.  */
+           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+           /* Note that we deliberately exit here.  If we fall
+              through to the end of the function __nusers would be
+              incremented which is not correct because the old owner
+              has to be discounted.  */
+           return EOWNERDEAD;
+         }
+
+       if (robust
+           && __builtin_expect (mutex->__data.__owner
+                                == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+         {
+           /* This mutex is now not recoverable.  */
+           mutex->__data.__count = 0;
+
+           INTERNAL_SYSCALL_DECL (__err);
+           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                             __lll_private_flag (FUTEX_UNLOCK_PI,
+                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
+                             0, 0);
+
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+           return ENOTRECOVERABLE;
+         }
+
+       mutex->__data.__count = 1;
+       if (robust)
+         {
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+         }
+       }
+      break;
+
+    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PP_NORMAL_NP:
+    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (mutex->__data.__owner == id)
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             return EDEADLK;
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       int oldprio = -1, ceilval;
+       do
+         {
+           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+                         >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+           if (__pthread_current_priority () > ceiling)
+             {
+               result = EINVAL;
+             failpp:
+               if (oldprio != -1)
+                 __pthread_tpp_change_priority (oldprio, -1);
+               return result;
+             }
+
+           result = __pthread_tpp_change_priority (oldprio, ceiling);
+           if (result)
+             return result;
+
+           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+           oldprio = ceiling;
+
+           oldval
+             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                    ceilval | 1, ceilval);
+
+           if (oldval == ceilval)
+             break;
+
+           do
+             {
+               oldval
+                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                        ceilval | 2,
+                                                        ceilval | 1);
+
+               if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+                 break;
+
+               if (oldval != ceilval)
+                 {
+                   /* Reject invalid timeouts.  */
+                   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+                     {
+                       result = EINVAL;
+                       goto failpp;
+                     }
+
+                   struct timeval tv;
+                   struct timespec rt;
+
+                   /* Get the current time.  */
+                   (void) gettimeofday (&tv, NULL);
+
+                   /* Compute relative timeout.  */
+                   rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+                   rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+                   if (rt.tv_nsec < 0)
+                     {
+                       rt.tv_nsec += 1000000000;
+                       --rt.tv_sec;
+                     }
+
+                   /* Already timed out?  */
+                   if (rt.tv_sec < 0)
+                     {
+                       result = ETIMEDOUT;
+                       goto failpp;
+                     }
+
+                   lll_futex_timed_wait (&mutex->__data.__lock,
+                                         ceilval | 2, &rt,
+                                         PTHREAD_MUTEX_PSHARED (mutex));
+                 }
+             }
+           while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                       ceilval | 2, ceilval)
+                  != ceilval);
+         }
+       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
+
+       assert (mutex->__data.__owner == 0);
+       mutex->__data.__count = 1;
+      }
+      break;
+
+    default:
+      /* Correct code cannot set any other type.  */
+      return EINVAL;
     }
 
   if (result == 0)
index 2eb88db..5555afc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <assert.h>
 #include <errno.h>
+#include <stdlib.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
 
 
 int
-__pthread_mutex_trylock (pthread_mutex_t *mutex)
+__pthread_mutex_trylock (
+     pthread_mutex_t *mutex)
 {
-  pid_t id;
+  int oldval;
+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
-  switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
+  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
+                           PTHREAD_MUTEX_TIMED_NP))
     {
       /* Recursive mutex.  */
     case PTHREAD_MUTEX_RECURSIVE_NP:
-      id = THREAD_GETMEM (THREAD_SELF, tid);
       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
        {
@@ -44,7 +48,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
          return 0;
        }
 
-      if (lll_mutex_trylock (mutex->__data.__lock) == 0)
+      if (lll_trylock (mutex->__data.__lock) == 0)
        {
          /* Record the ownership.  */
          mutex->__data.__owner = id;
@@ -55,20 +59,322 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
       break;
 
     case PTHREAD_MUTEX_ERRORCHECK_NP:
-      /* Error checking mutex.  We do not check for deadlocks.  */
-    default:
-      /* Correct code cannot set any other type.  */
     case PTHREAD_MUTEX_TIMED_NP:
     case PTHREAD_MUTEX_ADAPTIVE_NP:
       /* Normal mutex.  */
-      if (lll_mutex_trylock (mutex->__data.__lock) == 0)
+      if (lll_trylock (mutex->__data.__lock) != 0)
+       break;
+
+      /* Record the ownership.  */
+      mutex->__data.__owner = id;
+      ++mutex->__data.__nusers;
+
+      return 0;
+
+    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                    &mutex->__data.__list.__next);
+
+      oldval = mutex->__data.__lock;
+      do
        {
-         /* Record the ownership.  */
-         mutex->__data.__owner = THREAD_GETMEM (THREAD_SELF, tid);
-         ++mutex->__data.__nusers;
+       again:
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval = id | (oldval & FUTEX_WAITERS);
 
-         return 0;
+             newval
+               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                      newval, oldval);
+
+             if (newval != oldval)
+               {
+                 oldval = newval;
+                 goto again;
+               }
+
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+             /* Note that we deliberately exist here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  */
+             return EOWNERDEAD;
+           }
+
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+           {
+             int kind = PTHREAD_MUTEX_TYPE (mutex);
+             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+                 return EDEADLK;
+               }
+
+             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
+
+                 ++mutex->__data.__count;
+
+                 return 0;
+               }
+           }
+
+         oldval = lll_robust_trylock (mutex->__data.__lock, id);
+         if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
+           {
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+             return EBUSY;
+           }
+
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             if (oldval == id)
+               lll_unlock (mutex->__data.__lock,
+                           PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+             return ENOTRECOVERABLE;
+           }
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
+
+      ENQUEUE_MUTEX (mutex);
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+      mutex->__data.__owner = id;
+      ++mutex->__data.__nusers;
+      mutex->__data.__count = 1;
+
+      return 0;
+
+    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+       if (robust)
+         /* Note: robust PI futexes are signaled by setting bit 0.  */
+         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                  | 1));
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+               return EDEADLK;
+             }
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       oldval
+         = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                id, 0);
+
+       if (oldval != 0)
+         {
+           if ((oldval & FUTEX_OWNER_DIED) == 0)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+               return EBUSY;
+             }
+
+           assert (robust);
+
+           /* The mutex owner died.  The kernel will now take care of
+              everything.  */
+           int private = (robust
+                          ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+                          : PTHREAD_MUTEX_PSHARED (mutex));
+           INTERNAL_SYSCALL_DECL (__err);
+           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                                     __lll_private_flag (FUTEX_TRYLOCK_PI,
+                                                         private), 0, 0);
+
+           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
+               && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+               return EBUSY;
+             }
+
+           oldval = mutex->__data.__lock;
+         }
+
+       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+         {
+           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+           /* We got the mutex.  */
+           mutex->__data.__count = 1;
+           /* But it is inconsistent unless marked otherwise.  */
+           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+           ENQUEUE_MUTEX (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+           /* Note that we deliberately exit here.  If we fall
+              through to the end of the function __nusers would be
+              incremented which is not correct because the old owner
+              has to be discounted.  */
+           return EOWNERDEAD;
+         }
+
+       if (robust
+           && __builtin_expect (mutex->__data.__owner
+                                == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+         {
+           /* This mutex is now not recoverable.  */
+           mutex->__data.__count = 0;
+
+           INTERNAL_SYSCALL_DECL (__err);
+           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                             __lll_private_flag (FUTEX_UNLOCK_PI,
+                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
+                             0, 0);
+
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+           return ENOTRECOVERABLE;
+         }
+
+       if (robust)
+         {
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+         }
+
+       mutex->__data.__owner = id;
+       ++mutex->__data.__nusers;
+       mutex->__data.__count = 1;
+
+       return 0;
+      }
+
+    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PP_NORMAL_NP:
+    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (mutex->__data.__owner == id)
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             return EDEADLK;
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       int oldprio = -1, ceilval;
+       do
+         {
+           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+                         >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+           if (__pthread_current_priority () > ceiling)
+             {
+               if (oldprio != -1)
+                 __pthread_tpp_change_priority (oldprio, -1);
+               return EINVAL;
+             }
+
+           int retval = __pthread_tpp_change_priority (oldprio, ceiling);
+           if (retval)
+             return retval;
+
+           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+           oldprio = ceiling;
+
+           oldval
+             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                    ceilval | 1, ceilval);
+
+           if (oldval == ceilval)
+             break;
+         }
+       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
+
+       if (oldval != ceilval)
+         {
+           __pthread_tpp_change_priority (oldprio, -1);
+           break;
+         }
+
+       assert (mutex->__data.__owner == 0);
+       /* Record the ownership.  */
+       mutex->__data.__owner = id;
+       ++mutex->__data.__nusers;
+       mutex->__data.__count = 1;
+
+       return 0;
+      }
+      break;
+
+    default:
+      /* Correct code cannot set any other type.  */
+      return EINVAL;
     }
 
   return EBUSY;
index 0cb6df3..f164cde 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005-2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <assert.h>
 #include <errno.h>
+#include <stdlib.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
 
+static int
+internal_function
+__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
+     __attribute_noinline__;
 
 int
 internal_function attribute_hidden
@@ -28,9 +34,26 @@ __pthread_mutex_unlock_usercnt (
      pthread_mutex_t *mutex,
      int decr)
 {
-  switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
+  int type = PTHREAD_MUTEX_TYPE (mutex);
+  if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
+    return __pthread_mutex_unlock_full (mutex, decr);
+
+  if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
+      == PTHREAD_MUTEX_TIMED_NP)
+    {
+      /* Always reset the owner field.  */
+    normal:
+      mutex->__data.__owner = 0;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
+      return 0;
+    }
+  else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
     {
-    case PTHREAD_MUTEX_RECURSIVE_NP:
       /* Recursive mutex.  */
       if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
        return EPERM;
@@ -38,38 +61,231 @@ __pthread_mutex_unlock_usercnt (
       if (--mutex->__data.__count != 0)
        /* We still hold the mutex.  */
        return 0;
+      goto normal;
+    }
+  else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+    goto normal;
+  else
+    {
+      /* Error checking mutex.  */
+      assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
+      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
+         || ! lll_islocked (mutex->__data.__lock))
+       return EPERM;
+      goto normal;
+    }
+}
+
+
+static int
+internal_function
+__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
+{
+  int newowner = 0;
+
+  switch (PTHREAD_MUTEX_TYPE (mutex))
+    {
+    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+      /* Recursive mutex.  */
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         == THREAD_GETMEM (THREAD_SELF, tid)
+         && __builtin_expect (mutex->__data.__owner
+                              == PTHREAD_MUTEX_INCONSISTENT, 0))
+       {
+         if (--mutex->__data.__count != 0)
+           /* We still hold the mutex.  */
+           return ENOTRECOVERABLE;
+
+         goto notrecoverable;
+       }
+
+      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
+       return EPERM;
+
+      if (--mutex->__data.__count != 0)
+       /* We still hold the mutex.  */
+       return 0;
+
+      goto robust;
+
+    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         != THREAD_GETMEM (THREAD_SELF, tid)
+         || ! lll_islocked (mutex->__data.__lock))
+       return EPERM;
+
+      /* If the previous owner died and the caller did not succeed in
+        making the state consistent, mark the mutex as unrecoverable
+        and make all waiters.  */
+      if (__builtin_expect (mutex->__data.__owner
+                           == PTHREAD_MUTEX_INCONSISTENT, 0))
+      notrecoverable:
+       newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
+
+    robust:
+      /* Remove mutex from the list.  */
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                    &mutex->__data.__list.__next);
+      DEQUEUE_MUTEX (mutex);
+
+      mutex->__data.__owner = newowner;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      lll_robust_unlock (mutex->__data.__lock,
+                        PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
       break;
 
-    case PTHREAD_MUTEX_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+      /* Recursive mutex.  */
+      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
+       return EPERM;
+
+      if (--mutex->__data.__count != 0)
+       /* We still hold the mutex.  */
+       return 0;
+      goto continue_pi_non_robust;
+
+    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+      /* Recursive mutex.  */
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         == THREAD_GETMEM (THREAD_SELF, tid)
+         && __builtin_expect (mutex->__data.__owner
+                              == PTHREAD_MUTEX_INCONSISTENT, 0))
+       {
+         if (--mutex->__data.__count != 0)
+           /* We still hold the mutex.  */
+           return ENOTRECOVERABLE;
+
+         goto pi_notrecoverable;
+       }
+
+      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
+       return EPERM;
+
+      if (--mutex->__data.__count != 0)
+       /* We still hold the mutex.  */
+       return 0;
+
+      goto continue_pi_robust;
+
+    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         != THREAD_GETMEM (THREAD_SELF, tid)
+         || ! lll_islocked (mutex->__data.__lock))
+       return EPERM;
+
+      /* If the previous owner died and the caller did not succeed in
+        making the state consistent, mark the mutex as unrecoverable
+        and make all waiters.  */
+      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
+         && __builtin_expect (mutex->__data.__owner
+                              == PTHREAD_MUTEX_INCONSISTENT, 0))
+      pi_notrecoverable:
+       newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
+
+      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
+       {
+       continue_pi_robust:
+         /* Remove mutex from the list.
+            Note: robust PI futexes are signaled by setting bit 0.  */
+         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                  | 1));
+         DEQUEUE_MUTEX (mutex);
+       }
+
+    continue_pi_non_robust:
+      mutex->__data.__owner = newowner;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
+         || atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
+                                                  THREAD_GETMEM (THREAD_SELF,
+                                                                 tid)))
+       {
+         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+         int private = (robust
+                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+                        : PTHREAD_MUTEX_PSHARED (mutex));
+         INTERNAL_SYSCALL_DECL (__err);
+         INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
+                           __lll_private_flag (FUTEX_UNLOCK_PI, private));
+       }
+
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+      break;
+
+    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+      /* Recursive mutex.  */
+      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
+       return EPERM;
+
+      if (--mutex->__data.__count != 0)
+       /* We still hold the mutex.  */
+       return 0;
+      goto pp;
+
+    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
       /* Error checking mutex.  */
       if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
-         || ! lll_mutex_islocked (mutex->__data.__lock))
+         || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
        return EPERM;
-      break;
+      /* FALLTHROUGH */
+
+    case PTHREAD_MUTEX_PP_NORMAL_NP:
+    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+      /* Always reset the owner field.  */
+    pp:
+      mutex->__data.__owner = 0;
+
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      int newval, oldval;
+      do
+       {
+         oldval = mutex->__data.__lock;
+         newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
+       }
+      while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
+                                                  newval, oldval));
+
+      if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
+       lll_futex_wake (&mutex->__data.__lock, 1,
+                       PTHREAD_MUTEX_PSHARED (mutex));
+
+      int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+      return __pthread_tpp_change_priority (oldprio, -1);
 
     default:
       /* Correct code cannot set any other type.  */
-    case PTHREAD_MUTEX_TIMED_NP:
-    case PTHREAD_MUTEX_ADAPTIVE_NP:
-      /* Normal mutex.  Nothing special to do.  */
-      break;
+      return EINVAL;
     }
 
-  /* Always reset the owner field.  */
-  mutex->__data.__owner = 0;
-  if (decr)
-    /* One less user.  */
-    --mutex->__data.__nusers;
-
-  /* Unlock.  */
-  lll_mutex_unlock (mutex->__data.__lock);
-
   return 0;
 }
 
 
 int
-__pthread_mutex_unlock (pthread_mutex_t *mutex)
+__pthread_mutex_unlock (
+     pthread_mutex_t *mutex)
 {
   return __pthread_mutex_unlock_usercnt (mutex, 1);
 }
diff --git a/libpthread/nptl/pthread_mutexattr_getprioceiling.c b/libpthread/nptl/pthread_mutexattr_getprioceiling.c
new file mode 100644 (file)
index 0000000..29e3eb2
--- /dev/null
@@ -0,0 +1,48 @@
+/* Get priority ceiling setting from pthread_mutexattr_t.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <pthreadP.h>
+
+
+int
+pthread_mutexattr_getprioceiling (
+     const pthread_mutexattr_t *attr,
+     int *prioceiling)
+{
+  const struct pthread_mutexattr *iattr;
+  int ceiling;
+
+  iattr = (const struct pthread_mutexattr *) attr;
+
+  ceiling = ((iattr->mutexkind & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
+            >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT);
+
+  if (! ceiling)
+    {
+      if (__sched_fifo_min_prio == -1)
+       __init_sched_fifo_prio ();
+      if (ceiling < __sched_fifo_min_prio)
+       ceiling = __sched_fifo_min_prio;
+    }
+
+  *prioceiling = ceiling;
+
+  return 0;
+}
diff --git a/libpthread/nptl/pthread_mutexattr_getprotocol.c b/libpthread/nptl/pthread_mutexattr_getprotocol.c
new file mode 100644 (file)
index 0000000..49c5157
--- /dev/null
@@ -0,0 +1,37 @@
+/* Get priority protocol setting from pthread_mutexattr_t.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <pthreadP.h>
+
+
+int
+pthread_mutexattr_getprotocol (
+     const pthread_mutexattr_t *attr,
+     int *protocol)
+{
+  const struct pthread_mutexattr *iattr;
+
+  iattr = (const struct pthread_mutexattr *) attr;
+
+  *protocol = ((iattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
+              >> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT);
+
+  return 0;
+}
index 12de516..e67cf15 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -29,9 +29,7 @@ pthread_mutexattr_getpshared (
 
   iattr = (const struct pthread_mutexattr *) attr;
 
-  /* We use bit 31 to signal whether the mutex is going to be
-     process-shared or not.  */
-  *pshared = ((iattr->mutexkind & 0x80000000) != 0
+  *pshared = ((iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
              ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE);
 
   return 0;
diff --git a/libpthread/nptl/pthread_mutexattr_getrobust.c b/libpthread/nptl/pthread_mutexattr_getrobust.c
new file mode 100644 (file)
index 0000000..7d495f8
--- /dev/null
@@ -0,0 +1,37 @@
+/* Copyright (C) 2005, 2010 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <pthreadP.h>
+
+
+int
+pthread_mutexattr_getrobust (
+     const pthread_mutexattr_t *attr,
+     int *robustness)
+{
+  const struct pthread_mutexattr *iattr;
+
+  iattr = (const struct pthread_mutexattr *) attr;
+
+  *robustness = ((iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0
+                ? PTHREAD_MUTEX_ROBUST_NP : PTHREAD_MUTEX_STALLED_NP);
+
+  return 0;
+}
+weak_alias (pthread_mutexattr_getrobust, pthread_mutexattr_getrobust_np)
index 0f380a9..2543486 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -29,9 +29,7 @@ pthread_mutexattr_gettype (
 
   iattr = (const struct pthread_mutexattr *) attr;
 
-  /* We use bit 31 to signal whether the mutex is going to be
-     process-shared or not.  */
-  *kind = iattr->mutexkind & ~0x80000000;
+  *kind = iattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
 
   return 0;
 }
index ce8080d..1b67284 100644 (file)
@@ -22,7 +22,8 @@
 
 
 int
-__pthread_mutexattr_init (pthread_mutexattr_t *attr)
+__pthread_mutexattr_init (
+     pthread_mutexattr_t *attr)
 {
   if (sizeof (struct pthread_mutexattr) != sizeof (pthread_mutexattr_t))
     memset (attr, '\0', sizeof (*attr));
diff --git a/libpthread/nptl/pthread_mutexattr_setprioceiling.c b/libpthread/nptl/pthread_mutexattr_setprioceiling.c
new file mode 100644 (file)
index 0000000..3a13fb8
--- /dev/null
@@ -0,0 +1,47 @@
+/* Change priority ceiling setting in pthread_mutexattr_t.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <pthreadP.h>
+
+
+int
+pthread_mutexattr_setprioceiling (
+     pthread_mutexattr_t *attr,
+     int prioceiling)
+{
+  if (__sched_fifo_min_prio == -1)
+    __init_sched_fifo_prio ();
+
+  if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0)
+      || __builtin_expect (prioceiling > __sched_fifo_max_prio, 0)
+      || __builtin_expect ((prioceiling
+                           & (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
+                              >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
+                          != prioceiling, 0))
+    return EINVAL;
+
+  struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
+
+  iattr->mutexkind = ((iattr->mutexkind & ~PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
+                     | (prioceiling << PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT));
+
+  return 0;
+}
diff --git a/libpthread/nptl/pthread_mutexattr_setprotocol.c b/libpthread/nptl/pthread_mutexattr_setprotocol.c
new file mode 100644 (file)
index 0000000..1ffcaf6
--- /dev/null
@@ -0,0 +1,41 @@
+/* Change priority protocol setting in pthread_mutexattr_t.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <pthreadP.h>
+
+
+int
+pthread_mutexattr_setprotocol (
+     pthread_mutexattr_t *attr,
+     int protocol)
+{
+  if (protocol != PTHREAD_PRIO_NONE
+      && protocol != PTHREAD_PRIO_INHERIT
+      && __builtin_expect (protocol != PTHREAD_PRIO_PROTECT, 0))
+    return EINVAL;
+
+  struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
+
+  iattr->mutexkind = ((iattr->mutexkind & ~PTHREAD_MUTEXATTR_PROTOCOL_MASK)
+                     | (protocol << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT));
+
+  return 0;
+}
index 74df1bf..d84a92e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -34,12 +34,10 @@ pthread_mutexattr_setpshared (
 
   iattr = (struct pthread_mutexattr *) attr;
 
-  /* We use bit 31 to signal whether the mutex is going to be
-     process-shared or not.  */
   if (pshared == PTHREAD_PROCESS_PRIVATE)
-    iattr->mutexkind &= ~0x80000000;
+    iattr->mutexkind &= ~PTHREAD_MUTEXATTR_FLAG_PSHARED;
   else
-    iattr->mutexkind |= 0x80000000;
+    iattr->mutexkind |= PTHREAD_MUTEXATTR_FLAG_PSHARED;
 
   return 0;
 }
diff --git a/libpthread/nptl/pthread_mutexattr_setrobust.c b/libpthread/nptl/pthread_mutexattr_setrobust.c
new file mode 100644 (file)
index 0000000..fe94fd2
--- /dev/null
@@ -0,0 +1,44 @@
+/* Copyright (C) 2005, 2010 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <pthreadP.h>
+
+
+int
+pthread_mutexattr_setrobust (
+     pthread_mutexattr_t *attr,
+     int robustness)
+{
+  if (robustness != PTHREAD_MUTEX_STALLED_NP
+      && __builtin_expect (robustness != PTHREAD_MUTEX_ROBUST_NP, 0))
+    return EINVAL;
+
+  struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
+
+  /* We use bit 30 to signal whether the mutex is going to be
+     robust or not.  */
+  if (robustness == PTHREAD_MUTEX_STALLED_NP)
+    iattr->mutexkind &= ~PTHREAD_MUTEXATTR_FLAG_ROBUST;
+  else
+    iattr->mutexkind |= PTHREAD_MUTEXATTR_FLAG_ROBUST;
+
+  return 0;
+}
+weak_alias (pthread_mutexattr_setrobust, pthread_mutexattr_setrobust_np)
index aaeec81..f86dd63 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -33,9 +33,7 @@ __pthread_mutexattr_settype (
 
   iattr = (struct pthread_mutexattr *) attr;
 
-  /* We use bit 31 to signal whether the mutex is going to be
-     process-shared or not.  */
-  iattr->mutexkind = (iattr->mutexkind & 0x80000000) | kind;
+  iattr->mutexkind = (iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_BITS) | kind;
 
   return 0;
 }
index 8055960..aab832e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -18,6 +18,7 @@
    02111-1307 USA.  */
 
 #include "pthreadP.h"
+#include <bits/kernel-features.h>
 
 
 static const struct pthread_rwlockattr default_attr =
@@ -36,15 +37,36 @@ __pthread_rwlock_init (
 
   iattr = ((const struct pthread_rwlockattr *) attr) ?: &default_attr;
 
-  rwlock->__data.__lock = 0;
+  memset (rwlock, '\0', sizeof (*rwlock));
+
   rwlock->__data.__flags
     = iattr->lockkind == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP;
-  rwlock->__data.__nr_readers = 0;
-  rwlock->__data.__writer = 0;
-  rwlock->__data.__readers_wakeup = 0;
-  rwlock->__data.__writer_wakeup = 0;
-  rwlock->__data.__nr_readers_queued = 0;
-  rwlock->__data.__nr_writers_queued = 0;
+
+  /* The __SHARED field is computed to minimize the work that needs to
+     be done while handling the futex.  There are two inputs: the
+     availability of private futexes and whether the rwlock is shared
+     or private.  Unfortunately the value of a private rwlock is
+     fixed: it must be zero.  The PRIVATE_FUTEX flag has the value
+     0x80 in case private futexes are available and zero otherwise.
+     This leads to the following table:
+
+                |     pshared     |     result
+                | shared  private | shared  private |
+     ------------+-----------------+-----------------+
+     !avail 0    |     0       0   |     0       0   |
+      avail 0x80 |  0x80       0   |     0    0x80   |
+
+     If the pshared value is in locking functions XORed with avail
+     we get the expected result.  */
+#ifdef __ASSUME_PRIVATE_FUTEX
+  rwlock->__data.__shared = (iattr->pshared == PTHREAD_PROCESS_PRIVATE
+                            ? 0 : FUTEX_PRIVATE_FLAG);
+#else
+  rwlock->__data.__shared = (iattr->pshared == PTHREAD_PROCESS_PRIVATE
+                            ? 0
+                            : THREAD_GETMEM (THREAD_SELF,
+                                             header.private_futex));
+#endif
 
   return 0;
 }
index 0a9c815..30e6dc5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 int
-__pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
+__pthread_rwlock_tryrdlock (
+     pthread_rwlock_t *rwlock)
 {
   int result = EBUSY;
 
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   if (rwlock->__data.__writer == 0
       && (rwlock->__data.__nr_writers_queued == 0
-         || rwlock->__data.__flags == 0))
+         || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
     {
       if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
        {
@@ -42,7 +43,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
        result = 0;
     }
 
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
index 47150c1..a6f9ee4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 int
-__pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
+__pthread_rwlock_trywrlock (
+     pthread_rwlock_t *rwlock)
 {
   int result = EBUSY;
 
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
     {
@@ -35,7 +36,7 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
       result = 0;
     }
 
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
index 55dfefc..9b33b3e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,9 +26,9 @@
 
 int
 __pthread_setschedparam (
-        pthread_t threadid,
-        int policy,
-        const struct sched_param *param)
+     pthread_t threadid,
+     int policy,
+     const struct sched_param *param)
 {
   struct pthread *pd = (struct pthread *) threadid;
 
@@ -39,10 +39,23 @@ __pthread_setschedparam (
 
   int result = 0;
 
-  lll_lock (pd->lock);
+  lll_lock (pd->lock, LLL_PRIVATE);
+
+  struct sched_param p;
+  const struct sched_param *orig_param = param;
+
+  /* If the thread should have higher priority because of some
+     PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority.  */
+  if (__builtin_expect (pd->tpp != NULL, 0)
+      && pd->tpp->priomax > param->sched_priority)
+    {
+      p = *param;
+      p.sched_priority = pd->tpp->priomax;
+      param = &p;
+    }
 
   /* Try to set the scheduler information.  */
-  if (__builtin_expect (sched_setscheduler (pd->tid, policy,
+  if (__builtin_expect (__sched_setscheduler (pd->tid, policy,
                                              param) == -1, 0))
     result = errno;
   else
@@ -50,11 +63,11 @@ __pthread_setschedparam (
       /* We succeeded changing the kernel information.  Reflect this
         change in the thread descriptor.  */
       pd->schedpolicy = policy;
-      memcpy (&pd->schedparam, param, sizeof (struct sched_param));
+      memcpy (&pd->schedparam, orig_param, sizeof (struct sched_param));
       pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET;
     }
 
-  lll_unlock (pd->lock);
+  lll_unlock (pd->lock, LLL_PRIVATE);
 
   return result;
 }
index d5e30e5..7951460 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,7 +26,9 @@
 
 
 int
-pthread_setschedprio (pthread_t threadid, int prio)
+pthread_setschedprio (
+     pthread_t threadid,
+     int prio)
 {
   struct pthread *pd = (struct pthread *) threadid;
 
@@ -39,7 +41,12 @@ pthread_setschedprio (pthread_t threadid, int prio)
   struct sched_param param;
   param.sched_priority = prio;
 
-  lll_lock (pd->lock);
+  lll_lock (pd->lock, LLL_PRIVATE);
+
+  /* If the thread should have higher priority because of some
+     PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority.  */
+  if (__builtin_expect (pd->tpp != NULL, 0) && pd->tpp->priomax > prio)
+    param.sched_priority = pd->tpp->priomax;
 
   /* Try to set the scheduler information.  */
   if (__builtin_expect (sched_setparam (pd->tid, &param) == -1, 0))
@@ -48,11 +55,12 @@ pthread_setschedprio (pthread_t threadid, int prio)
     {
       /* We succeeded changing the kernel information.  Reflect this
         change in the thread descriptor.  */
+      param.sched_priority = prio;
       memcpy (&pd->schedparam, &param, sizeof (struct sched_param));
       pd->flags |= ATTR_FLAG_SCHED_SET;
     }
 
-  lll_unlock (pd->lock);
+  lll_unlock (pd->lock, LLL_PRIVATE);
 
   return result;
 }
index 8761eb9..8e24be7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -52,8 +52,8 @@ __pthread_setspecific (
     }
   else
     {
-      if (KEY_UNUSED ((seq = __pthread_keys[key].seq))
-         || key >= PTHREAD_KEYS_MAX)
+      if (key >= PTHREAD_KEYS_MAX
+         || KEY_UNUSED ((seq = __pthread_keys[key].seq)))
        /* Not valid.  */
        return EINVAL;
 
index ce2cf3f..4098a73 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,7 +19,7 @@
 
 #include <errno.h>
 #include <stdlib.h>
-#include "atomic.h"
+#include <atomic.h>
 #include "pthreadP.h"
 
 
@@ -32,9 +32,9 @@ cleanup (void *arg)
 
 int
 pthread_timedjoin_np (
-        pthread_t threadid,
-        void **thread_return,
-        const struct timespec *abstime)
+     pthread_t threadid,
+     void **thread_return,
+     const struct timespec *abstime)
 {
   struct pthread *self;
   struct pthread *pd = (struct pthread *) threadid;
index 049cb5e..5746a9e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <errno.h>
 #include <stdlib.h>
 
-#include "atomic.h"
+#include <atomic.h>
 #include "pthreadP.h"
 
 
 int
-pthread_tryjoin_np (pthread_t threadid, void **thread_return)
+pthread_tryjoin_np (
+     pthread_t threadid,
+     void **thread_return)
 {
   struct pthread *self;
   struct pthread *pd = (struct pthread *) threadid;
index 3752989..6a4d18b 100644 (file)
@@ -17,7 +17,6 @@
    02111-1307 USA.  */
 
 #include <features.h>
-
 #include <tls.h>
 #include <resolv.h>
 
index a2bcc71..dcf30f7 100644 (file)
@@ -41,12 +41,13 @@ walker (const void *inodep, const VISIT which, const int depth)
 
 
 int
-sem_close (sem_t *sem)
+sem_close (
+     sem_t *sem)
 {
   int result = 0;
 
   /* Get the lock.  */
-  lll_lock (__sem_mappings_lock);
+  lll_lock (__sem_mappings_lock, LLL_PRIVATE);
 
   /* Locate the entry for the mapping the caller provided.  */
   rec = NULL;
@@ -74,7 +75,7 @@ sem_close (sem_t *sem)
     }
 
   /* Release the lock.  */
-  lll_unlock (__sem_mappings_lock);
+  lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
 
   return result;
 }
index 2e612c1..6362c0e 100644 (file)
@@ -22,7 +22,8 @@
 
 
 int
-__new_sem_destroy (sem_t *sem)
+__new_sem_destroy (
+     sem_t *sem)
 {
   /* XXX Check for valid parameter.  */
 
index 5e6c8df..928026f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,11 +26,11 @@ __new_sem_getvalue (
      sem_t *sem,
      int *sval)
 {
-  struct sem *isem = (struct sem *) sem;
+  struct new_sem *isem = (struct new_sem *) sem;
 
   /* XXX Check for valid SEM parameter.  */
 
-  *sval = isem->count;
+  *sval = isem->value;
 
   return 0;
 }
index 1439bd0..0a224f3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -21,6 +21,7 @@
 #include <semaphore.h>
 #include <lowlevellock.h>
 #include "semaphoreP.h"
+#include <bits/kernel-features.h>
 
 
 int
@@ -37,13 +38,18 @@ __new_sem_init (
     }
 
   /* Map to the internal type.  */
-  struct sem *isem = (struct sem *) sem;
-
-  /* Use the value the user provided.  */
-  isem->count = value;
-
-  /* We can completely ignore the PSHARED parameter since inter-process
-     use needs no special preparation.  */
+  struct new_sem *isem = (struct new_sem *) sem;
+
+  /* Use the values the user provided.  */
+  isem->value = value;
+#ifdef __ASSUME_PRIVATE_FUTEX
+  isem->private = pshared ? 0 : FUTEX_PRIVATE_FLAG;
+#else
+  isem->private = pshared ? 0 : THREAD_GETMEM (THREAD_SELF,
+                                              header.private_futex);
+#endif
+
+  isem->nwaiters = 0;
 
   return 0;
 }
index 25389f0..8ded82c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 
 /* Compatibility defines. */
-#define __endmntent                    endmntent
-#define __fxstat64(vers, fd, buf)      fstat64(fd, buf)
-#define __getmntent_r                  getmntent_r
-#define __setmntent                    setmntent
-#define __statfs                       statfs
-#define __libc_close                   close
-#define __libc_open                    open
-#define __libc_write                   write
+#define __endmntent                    endmntent
+#define __fxstat64(vers, fd, buf)      fstat64(fd, buf)
+#define __getmntent_r                  getmntent_r
+#define __setmntent                    setmntent
+#define __statfs                       statfs
+#define __libc_close                   close
+#define __libc_open                    open
+#define __libc_write                   write
+
 
 /* Information about the mount point.  */
 struct mountpoint_info mountpoint attribute_hidden;
@@ -157,7 +158,7 @@ __sem_search (const void *a, const void *b)
 void *__sem_mappings attribute_hidden;
 
 /* Lock to protect the search tree.  */
-lll_lock_t __sem_mappings_lock = LLL_LOCK_INITIALIZER;
+int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
 
 
 /* Search for existing mapping and if possible add the one provided.  */
@@ -176,7 +177,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
 #endif
     {
       /* Get the lock.  */
-      lll_lock (__sem_mappings_lock);
+      lll_lock (__sem_mappings_lock, LLL_PRIVATE);
 
       /* Search for an existing mapping given the information we have.  */
       struct inuse_sem *fake;
@@ -225,7 +226,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
        }
 
       /* Release the lock.  */
-      lll_unlock (__sem_mappings_lock);
+      lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
     }
 
   if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED)
@@ -317,24 +318,28 @@ sem_open (const char *name, int oflag, ...)
        }
 
       /* Create the initial file content.  */
-      sem_t initsem;
+      union
+      {
+       sem_t initsem;
+       struct new_sem newsem;
+      } sem;
 
-      struct sem *iinitsem = (struct sem *) &initsem;
-      iinitsem->count = value;
+      sem.newsem.value = value;
+      sem.newsem.private = 0;
+      sem.newsem.nwaiters = 0;
 
       /* Initialize the remaining bytes as well.  */
-      memset ((char *) &initsem + sizeof (struct sem), '\0',
-             sizeof (sem_t) - sizeof (struct sem));
+      memset ((char *) &sem.initsem + sizeof (struct new_sem), '\0',
+             sizeof (sem_t) - sizeof (struct new_sem));
 
       tmpfname = (char *) alloca (mountpoint.dirlen + 6 + 1);
-      char *xxxxxx = mempcpy (tmpfname, mountpoint.dir, mountpoint.dirlen);
-      strcpy (xxxxxx, "XXXXXX");
+      mempcpy (tmpfname, mountpoint.dir, mountpoint.dirlen);
 
       fd = __gen_tempname (tmpfname, __GT_FILE, mode);
       if (fd == -1)
-          return SEM_FAILED;
+        return SEM_FAILED;
 
-      if (TEMP_FAILURE_RETRY (__libc_write (fd, &initsem, sizeof (sem_t)))
+      if (TEMP_FAILURE_RETRY (__libc_write (fd, &sem.initsem, sizeof (sem_t)))
          == sizeof (sem_t)
          /* Map the sem_t structure from the file.  */
          && (result = (sem_t *) mmap (NULL, sizeof (sem_t),
index e52a08d..beed02e 100644 (file)
@@ -26,7 +26,8 @@
 
 
 int
-sem_unlink (const char *name)
+sem_unlink (
+     const char *name)
 {
   char *fname;
   size_t namelen;
index d14ea92..7d6fd25 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once attribute_hidden;
 extern void *__sem_mappings attribute_hidden;
 
 /* Lock to protect the search tree.  */
-extern lll_lock_t __sem_mappings_lock;
+extern int __sem_mappings_lock attribute_hidden;
 
 
 /* Initializer for mountpoint.  */
@@ -60,8 +60,10 @@ extern int __sem_search (const void *a, const void *b) attribute_hidden;
 
 /* Prototypes of functions with multiple interfaces.  */
 extern int __new_sem_init (sem_t *sem, int pshared, unsigned int value);
+extern int __old_sem_init (sem_t *sem, int pshared, unsigned int value);
 extern int __new_sem_destroy (sem_t *sem);
 extern int __new_sem_post (sem_t *sem);
 extern int __new_sem_wait (sem_t *sem);
+extern int __old_sem_wait (sem_t *sem);
 extern int __new_sem_trywait (sem_t *sem);
 extern int __new_sem_getvalue (sem_t *sem, int *sval);
index b8efd59..29200a1 100644 (file)
@@ -21,7 +21,6 @@
 #define _TLS_H 1
 
 #ifndef __ASSEMBLER__
-#include <dl-sysdep.h>
 
 # include <stdbool.h>
 # include <stddef.h>
index 7f95daa..0600e17 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Low level locking macros used in NPTL implementation.  Stub version.
+   Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <atomic.h>
 
 
-/* Implement generic mutex.  Basic futex syscall support is required:
-
-     lll_futex_wait(futex, value) - call sys_futex with FUTEX_WAIT
-                                   and third parameter VALUE
-
-     lll_futex_wake(futex, value) - call sys_futex with FUTEX_WAKE
-                                   and third parameter VALUE
-*/
-
-
 /* Mutex lock counter:
    bit 31 clear means unlocked;
    bit 31 set means locked.
@@ -65,7 +56,9 @@ __generic_mutex_lock (int *mutex)
       if (v >= 0)
        continue;
 
-      lll_futex_wait (mutex, v);
+      lll_futex_wait (mutex, v,
+                     // XYZ check mutex flag
+                     LLL_SHARED);
     }
 }
 
@@ -81,7 +74,9 @@ __generic_mutex_unlock (int *mutex)
 
   /* There are other threads waiting for this mutex, wake one of them
      up.  */
-  lll_futex_wake (mutex, 1);
+  lll_futex_wake (mutex, 1,
+                 // XYZ check mutex flag
+                 LLL_SHARED);
 }
 
 
index a6b1cf4..34cd525 100644 (file)
@@ -29,7 +29,8 @@
 
 
 int
-pthread_spin_lock (pthread_spinlock_t *lock)
+pthread_spin_lock (
+     pthread_spinlock_t *lock)
 {
   __asm__ ("\n"
        "1:\t" LOCK_PREFIX "decl %0\n\t"
index 4e0444b..69f9deb 100644 (file)
@@ -11,3 +11,7 @@ SYSINFO_OFFSET                offsetof (tcbhead_t, sysinfo)
 CLEANUP                        offsetof (struct pthread, cleanup)
 CLEANUP_PREV           offsetof (struct _pthread_cleanup_buffer, __prev)
 MUTEX_FUTEX            offsetof (pthread_mutex_t, __data.__lock)
+POINTER_GUARD          offsetof (tcbhead_t, pointer_guard)
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX          offsetof (tcbhead_t, private_futex)
+#endif
index 52bde9e..5f27d8f 100644 (file)
@@ -1,5 +1,5 @@
 /* Definition for thread-local data handling.  nptl/i386 version.
-   Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -26,6 +26,8 @@
 # include <stdint.h>
 # include <stdlib.h>
 # include <list.h>
+# include <sysdep.h>
+# include <bits/kernel-features.h>
 
 
 /* Type for the dtv.  */
@@ -49,6 +51,15 @@ typedef struct
   int multiple_threads;
   uintptr_t sysinfo;
   uintptr_t stack_guard;
+  uintptr_t pointer_guard;
+  int gscope_flag;
+#ifndef __ASSUME_PRIVATE_FUTEX
+  int private_futex;
+#else
+  int __unused1;
+#endif
+  /* Reservation of some values for the TM ABI.  */
+  void *__private_tm[5];
 } tcbhead_t;
 
 # define TLS_MULTIPLE_THREADS_IN_TCB 1
@@ -64,7 +75,8 @@ typedef struct
 #define HAVE_TLS_MODEL_ATTRIBUTE 1
 
 /* Signal that TLS support is available.  */
-#define USE_TLS        1
+#define USE_TLS        1
+
 
 /* Alignment requirement for the stack.  For IA-32 this is governed by
    the SSE memory functions.  */
@@ -99,6 +111,9 @@ union user_desc_init
 };
 
 
+/* Get the thread descriptor definition.  */
+# include <descr.h>
+
 /* This is the size of the initial TCB.  Can't be just sizeof (tcbhead_t),
    because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
    struct pthread even when not linked with -lpthread.  */
@@ -113,9 +128,6 @@ union user_desc_init
 /* Alignment requirements for the TCB.  */
 # define TLS_TCB_ALIGN __alignof__ (struct pthread)
 
-/* Get the thread descriptor definition. */
-#include <descr.h>
-
 /* The TCB can have any size and the memory following the address the
    thread pointer points to is unspecified.  Allocate the TCB there.  */
 # define TLS_TCB_AT_TP 1
@@ -220,7 +232,7 @@ union user_desc_init
      _segdescr.vals[3] = 0x51;                                               \
                                                                              \
      /* Install the TLS.  */                                                 \
-     __asm__ volatile (TLS_LOAD_EBX                                          \
+     __asm__ volatile (TLS_LOAD_EBX                                              \
                   "int $0x80\n\t"                                            \
                   TLS_LOAD_EBX                                               \
                   : "=a" (_result), "=m" (_segdescr.desc.entry_number)       \
@@ -256,7 +268,7 @@ union user_desc_init
    do not get optimized away.  */
 # define THREAD_SELF \
   ({ struct pthread *__self;                                                 \
-     __asm__ ("movl %%gs:%c1,%0" : "=r" (__self)                             \
+     __asm__ ("movl %%gs:%c1,%0" : "=r" (__self)                                   \
          : "i" (offsetof (struct pthread, header.self)));                    \
      __self;})
 
@@ -270,11 +282,11 @@ union user_desc_init
 # define THREAD_GETMEM(descr, member) \
   ({ __typeof (descr->member) __value;                                       \
      if (sizeof (__value) == 1)                                                      \
-       __asm__ volatile ("movb %%gs:%P2,%b0"                                 \
+       __asm__ volatile ("movb %%gs:%P2,%b0"                               \
                     : "=q" (__value)                                         \
                     : "0" (0), "i" (offsetof (struct pthread, member)));     \
      else if (sizeof (__value) == 4)                                         \
-       __asm__ volatile ("movl %%gs:%P1,%0"                                  \
+       __asm__ volatile ("movl %%gs:%P1,%0"                                        \
                     : "=r" (__value)                                         \
                     : "i" (offsetof (struct pthread, member)));              \
      else                                                                    \
@@ -297,12 +309,12 @@ union user_desc_init
 # define THREAD_GETMEM_NC(descr, member, idx) \
   ({ __typeof (descr->member[0]) __value;                                    \
      if (sizeof (__value) == 1)                                                      \
-       __asm__ volatile ("movb %%gs:%P2(%3),%b0"                             \
+       __asm__ volatile ("movb %%gs:%P2(%3),%b0"                                     \
                     : "=q" (__value)                                         \
                     : "0" (0), "i" (offsetof (struct pthread, member[0])),   \
                     "r" (idx));                                              \
      else if (sizeof (__value) == 4)                                         \
-       __asm__ volatile ("movl %%gs:%P1(,%2,4),%0"                           \
+       __asm__ volatile ("movl %%gs:%P1(,%2,4),%0"                                   \
                     : "=r" (__value)                                         \
                     : "i" (offsetof (struct pthread, member[0])),            \
                       "r" (idx));                                            \
@@ -350,7 +362,7 @@ union user_desc_init
 /* Set member of the thread descriptor directly.  */
 # define THREAD_SETMEM_NC(descr, member, idx, value) \
   ({ if (sizeof (descr->member[0]) == 1)                                     \
-       __asm__ volatile ("movb %b0,%%gs:%P1(%2)" :                           \
+       __asm__ volatile ("movb %b0,%%gs:%P1(%2)" :                                   \
                     : "iq" (value),                                          \
                       "i" (offsetof (struct pthread, member)),               \
                       "r" (idx));                                            \
@@ -366,7 +378,7 @@ union user_desc_init
              4 or 8.  */                                                     \
           abort ();                                                          \
                                                                              \
-        __asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t"                   \
+        __asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t"                           \
                       "movl %%edx,%%gs:4+%P1(,%2,8)" :                       \
                       : "A" (value),                                         \
                         "i" (offsetof (struct pthread, member)),             \
@@ -389,6 +401,17 @@ union user_desc_init
      __ret; })
 
 
+/* Atomic logical and.  */
+#define THREAD_ATOMIC_AND(descr, member, val) \
+  (void) ({ if (sizeof ((descr)->member) == 4)                               \
+             __asm__ volatile (LOCK_PREFIX "andl %1, %%gs:%P0"               \
+                           :: "i" (offsetof (struct pthread, member)),       \
+                              "ir" (val));                                   \
+           else                                                              \
+             /* Not necessary for other sizes in the moment.  */             \
+             abort (); })
+
+
 /* Atomic set bit.  */
 #define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
   (void) ({ if (sizeof ((descr)->member) == 4)                               \
@@ -424,6 +447,34 @@ union user_desc_init
    = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
 
 
+/* Set the pointer guard field in the TCB head.  */
+#define THREAD_SET_POINTER_GUARD(value) \
+  THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
+#define THREAD_COPY_POINTER_GUARD(descr) \
+  ((descr)->header.pointer_guard                                             \
+   = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
+
+
+/* Get and set the global scope generation counter in the TCB head.  */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED   1
+#define THREAD_GSCOPE_FLAG_WAIT   2
+#define THREAD_GSCOPE_RESET_FLAG() \
+  do                                                                         \
+    { int __res;                                                             \
+      __asm__ volatile ("xchgl %0, %%gs:%P1"                                 \
+                   : "=r" (__res)                                            \
+                   : "i" (offsetof (struct pthread, header.gscope_flag)),    \
+                     "0" (THREAD_GSCOPE_FLAG_UNUSED));                       \
+      if (__res == THREAD_GSCOPE_FLAG_WAIT)                                  \
+       lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE);    \
+    }                                                                        \
+  while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+  THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
+#define THREAD_GSCOPE_WAIT() \
+  GL(dl_wait_lookup_done) ()
+
 #endif /* __ASSEMBLER__ */
 
 #endif /* tls.h */
index 3962edb..8ac133d 100644 (file)
@@ -14,3 +14,7 @@ MULTIPLE_THREADS_OFFSET               thread_offsetof (header.multiple_threads)
 #endif
 PID                            thread_offsetof (pid)
 TID                            thread_offsetof (tid)
+POINTER_GUARD                  (offsetof (tcbhead_t, pointer_guard) - TLS_TCB_OFFSET - sizeof (tcbhead_t))
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX_OFFSET           thread_offsetof (header.private_futex)
+#endif
index 1157116..ce5559e 100644 (file)
@@ -1,5 +1,5 @@
 /* Definition for thread-local data handling.  NPTL/PowerPC version.
-   Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -47,7 +47,7 @@ typedef union dtv
 #endif
 
 /* Signal that TLS support is available.  */
-# define USE_TLS       1
+# define USE_TLS       1
 
 #ifndef __ASSEMBLER__
 
@@ -64,9 +64,11 @@ typedef union dtv
 # include <nptl/descr.h>
 
 /* The stack_guard is accessed directly by GCC -fstack-protector code,
-   so it is a part of public ABI.  The dtv field is private.  */
+   so it is a part of public ABI.  The dtv and pointer_guard fields
+   are private.  */
 typedef struct
 {
+  uintptr_t pointer_guard;
   uintptr_t stack_guard;
   dtv_t *dtv;
 } tcbhead_t;
@@ -164,10 +166,44 @@ register void *__thread_register __asm__ ("r13");
      = ((tcbhead_t *) ((char *) __thread_register                            \
                       - TLS_TCB_OFFSET))[-1].stack_guard)
 
+/* Set the stack guard field in TCB head.  */
+# define THREAD_GET_POINTER_GUARD() \
+    (((tcbhead_t *) ((char *) __thread_register                                      \
+                    - TLS_TCB_OFFSET))[-1].pointer_guard)
+# define THREAD_SET_POINTER_GUARD(value) \
+    (THREAD_GET_POINTER_GUARD () = (value))
+# define THREAD_COPY_POINTER_GUARD(descr) \
+    (((tcbhead_t *) ((char *) (descr)                                        \
+                    + TLS_PRE_TCB_SIZE))[-1].pointer_guard                   \
+     = THREAD_GET_POINTER_GUARD())
+
 /* l_tls_offset == 0 is perfectly valid on PPC, so we have to use some
    different value to mean unset l_tls_offset.  */
 # define NO_TLS_OFFSET         -1
 
+/* Get and set the global scope generation counter in struct pthread.  */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED   1
+#define THREAD_GSCOPE_FLAG_WAIT   2
+#define THREAD_GSCOPE_RESET_FLAG() \
+  do                                                                        \
+    { int __res                                                                     \
+       = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag,             \
+                              THREAD_GSCOPE_FLAG_UNUSED);                   \
+      if (__res == THREAD_GSCOPE_FLAG_WAIT)                                 \
+       lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE);   \
+    }                                                                       \
+  while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+  do                                                                        \
+    {                                                                       \
+      THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;            \
+      atomic_write_barrier ();                                              \
+    }                                                                       \
+  while (0)
+#define THREAD_GSCOPE_WAIT() \
+  GL(dl_wait_lookup_done) ()
+
 #endif /* __ASSEMBLER__ */
 
 #endif /* tls.h */
index 0d10979..9497789 100644 (file)
@@ -18,7 +18,7 @@ libpthread_CSRC = pthread_barrier_wait.c pthread_cond_broadcast.c     \
                  pthread_rwlock_wrlock.c pthread_sigmask.c             \
                  pthread_spin_destroy.c pthread_spin_init.c            \
                  pthread_spin_unlock.c pt-sigfillset.c \
-                 pt-longjmp.c
+                 pt-longjmp.c tpp.c
 
 
 ifeq ($(TARGET_ARCH),i386)
@@ -43,6 +43,13 @@ SH_PTHREAD_EXCLUDE_LIST = pthread_spin_unlock.c pthread_spin_init.c \
 libpthread_CSRC := $(filter-out $(SH_PTHREAD_EXCLUDE_LIST),$(libpthread_CSRC))
 endif
 
+ifeq ($(TARGET_ARCH),sparc)
+SPARC_PTHREAD_EXCLUDE_LIST = pthread_barrier_init.c pthread_barrier_wait.c \
+               pthread_barrier_destroy.c
+
+libpthread_CSRC := $(filter-out $(SPARC_PTHREAD_EXCLUDE_LIST),$(libpthread_CSRC))
+endif
+
 ifeq ($(TARGET_ARCH),x86_64)
 X64_PTHREAD_EXCLUDE_LIST = pthread_spin_unlock.c pthread_spin_init.c \
                pthread_barrier_wait.c pthread_cond_broadcast.c \
index 35224ec..f13c3a3 100644 (file)
@@ -1,5 +1,5 @@
 /* Determine whether block of given size can be allocated on the stack or not.
-   Copyright (C) 2002 Free Software Foundation, Inc.
+   Copyright (C) 2002, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,8 @@
 #include <limits.h>
 
 
-extern inline int
+extern int
+__always_inline
 __libc_use_alloca (size_t size)
 {
   return (__builtin_expect (size <= PTHREAD_STACK_MIN / 4, 1)
index c59e3a0..70fe676 100644 (file)
@@ -1,5 +1,5 @@
 /* libc-internal interface for mutex locks.  NPTL version.
-   Copyright (C) 1996-2001, 2002, 2003, 2005 Free Software Foundation, Inc.
+   Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -150,13 +150,17 @@ typedef pthread_key_t __libc_key_t;
 
 /* Call thread functions through the function pointer table.  */
 #if defined SHARED && !defined NOT_IN_libc
-# define PTF(NAME) __libc_pthread_functions.ptr_##NAME
+# define PTFAVAIL(NAME) __libc_pthread_functions_init
 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
-  (PTF(FUNC) != NULL ? PTF(FUNC) ARGS : ELSE)
+  (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
+# define __libc_ptf_call_always(FUNC, ARGS) \
+  PTHFCT_CALL (ptr_##FUNC, ARGS)
 #else
-# define PTF(NAME) NAME
+# define PTFAVAIL(NAME) (NAME != NULL)
 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
   __libc_maybe_call (FUNC, ARGS, ELSE)
+# define __libc_ptf_call_always(FUNC, ARGS) \
+  FUNC ARGS
 #endif
 
 
@@ -168,8 +172,15 @@ typedef pthread_key_t __libc_key_t;
 # define __libc_lock_init(NAME) \
   __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
 #endif
-#define __libc_rwlock_init(NAME) \
+#if defined SHARED && !defined NOT_IN_libc
+/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
+   inefficient.  */
+# define __libc_rwlock_init(NAME) \
+  (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
+#else
+# define __libc_rwlock_init(NAME) \
   __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
+#endif
 
 /* Same as last but this time we initialize a recursive mutex.  */
 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
@@ -210,8 +221,12 @@ typedef pthread_key_t __libc_key_t;
 # define __libc_lock_fini(NAME) \
   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
 #endif
-#define __libc_rwlock_fini(NAME) \
+#if defined SHARED && !defined NOT_IN_libc
+# define __libc_rwlock_fini(NAME) ((void) 0)
+#else
+# define __libc_rwlock_fini(NAME) \
   __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
+#endif
 
 /* Finalize recursive named lock.  */
 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
@@ -224,7 +239,7 @@ typedef pthread_key_t __libc_key_t;
 /* Lock the named lock variable.  */
 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
 # define __libc_lock_lock(NAME) \
-  ({ lll_lock (NAME); 0; })
+  ({ lll_lock (NAME, LLL_PRIVATE); 0; })
 #else
 # define __libc_lock_lock(NAME) \
   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
@@ -241,7 +256,7 @@ typedef pthread_key_t __libc_key_t;
     void *self = THREAD_SELF;                                                \
     if ((NAME).owner != self)                                                \
       {                                                                              \
-       lll_lock ((NAME).lock);                                               \
+       lll_lock ((NAME).lock, LLL_PRIVATE);                                  \
        (NAME).owner = self;                                                  \
       }                                                                              \
     ++(NAME).cnt;                                                            \
@@ -295,7 +310,7 @@ typedef pthread_key_t __libc_key_t;
 /* Unlock the named lock variable.  */
 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
 # define __libc_lock_unlock(NAME) \
-  lll_unlock (NAME)
+  lll_unlock (NAME, LLL_PRIVATE)
 #else
 # define __libc_lock_unlock(NAME) \
   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
@@ -311,7 +326,7 @@ typedef pthread_key_t __libc_key_t;
     if (--(NAME).cnt == 0)                                                   \
       {                                                                              \
        (NAME).owner = NULL;                                                  \
-       lll_unlock ((NAME).lock);                                             \
+       lll_unlock ((NAME).lock, LLL_PRIVATE);                                \
       }                                                                              \
   } while (0)
 #else
@@ -353,8 +368,9 @@ typedef pthread_key_t __libc_key_t;
 /* Call handler iff the first call.  */
 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
   do {                                                                       \
-    if (PTF(__pthread_once) != NULL)                                         \
-      PTF(__pthread_once) (&(ONCE_CONTROL), INIT_FUNCTION);                  \
+    if (PTFAVAIL (__pthread_once))                                           \
+      __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),                      \
+                                              INIT_FUNCTION));               \
     else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {                          \
       INIT_FUNCTION ();                                                              \
       (ONCE_CONTROL) |= 2;                                                   \
@@ -380,9 +396,10 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
   { struct _pthread_cleanup_buffer _buffer;                                  \
     int _avail;                                                                      \
     if (DOIT) {                                                                      \
-      _avail = PTF(_pthread_cleanup_push_defer) != NULL;                     \
+      _avail = PTFAVAIL (_pthread_cleanup_push_defer);                       \
       if (_avail) {                                                          \
-       PTF(_pthread_cleanup_push_defer) (&_buffer, FCT, ARG);                \
+       __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT,  \
+                                                             ARG));          \
       } else {                                                               \
        _buffer.__routine = (FCT);                                            \
        _buffer.__arg = (ARG);                                                \
@@ -394,7 +411,7 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
 /* End critical region with cleanup.  */
 #define __libc_cleanup_region_end(DOIT) \
     if (_avail) {                                                            \
-      PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT);                    \
+      __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
     } else if (DOIT)                                                         \
       _buffer.__routine (_buffer.__arg);                                     \
   }
@@ -402,16 +419,13 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
 /* Sometimes we have to exit the block in the middle.  */
 #define __libc_cleanup_end(DOIT) \
     if (_avail) {                                                            \
-      PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT);                    \
+      __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
     } else if (DOIT)                                                         \
       _buffer.__routine (_buffer.__arg)
 
 
 /* Normal cleanup handling, based on C cleanup attribute.  */
 __extern_inline void
-__libc_cleanup_routine (struct __pthread_cleanup_frame *f);
-
-__extern_inline void
 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
 {
   if (f->__do_it)
@@ -531,6 +545,7 @@ weak_extern (__pthread_key_create)
 weak_extern (__pthread_setspecific)
 weak_extern (__pthread_getspecific)
 weak_extern (__pthread_once)
+weak_extern (__pthread_initialize)
 weak_extern (__pthread_atfork)
 #ifdef SHARED
 weak_extern (_pthread_cleanup_push_defer)
@@ -556,6 +571,7 @@ weak_extern (pthread_setcancelstate)
 #  pragma weak __pthread_setspecific
 #  pragma weak __pthread_getspecific
 #  pragma weak __pthread_once
+#  pragma weak __pthread_initialize
 #  pragma weak __pthread_atfork
 #  pragma weak _pthread_cleanup_push_defer
 #  pragma weak _pthread_cleanup_pop_restore
index 960bde1..9a524e5 100644 (file)
@@ -1,5 +1,5 @@
 /* Signal handling function for threaded programs.
-   Copyright (C) 1998, 1999, 2000, 2002 Free Software Foundation, Inc.
+   Copyright (C) 1998, 1999, 2000, 2002, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -35,4 +35,10 @@ extern int pthread_sigmask (int __how,
 /* Send signal SIGNO to the given thread. */
 extern int pthread_kill (pthread_t __threadid, int __signo) __THROW;
 
+#ifdef __USE_GNU
+/* Queue signal and data to a thread.  */
+extern int pthread_sigqueue (pthread_t __threadid, int __signo,
+                            const union sigval __value) __THROW;
+#endif
+
 #endif /* bits/sigthread.h */
index cd64bc3..b8efdd8 100644 (file)
@@ -1,5 +1,5 @@
 /* Thread package specific definitions of stream lock type.  NPTL version.
-   Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+   Copyright (C) 2000, 2001, 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
     void *__self = THREAD_SELF;                                                      \
     if ((_name).owner != __self)                                             \
       {                                                                              \
-        lll_lock ((_name).lock);                                             \
+       lll_lock ((_name).lock, LLL_PRIVATE);                                 \
         (_name).owner = __self;                                                      \
       }                                                                              \
     ++(_name).cnt;                                                           \
@@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
     if (--(_name).cnt == 0)                                                  \
       {                                                                              \
         (_name).owner = NULL;                                                \
-        lll_unlock ((_name).lock);                                           \
+       lll_unlock ((_name).lock, LLL_PRIVATE);                               \
       }                                                                              \
   } while (0)
 
@@ -94,9 +94,15 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
        __attribute__((cleanup (_IO_acquire_lock_fct)))                       \
        = (_fp);                                                              \
     _IO_flockfile (_IO_acquire_lock_file);
-
+#  define _IO_acquire_lock_clear_flags2(_fp) \
+  do {                                                                       \
+    _IO_FILE *_IO_acquire_lock_file                                          \
+       __attribute__((cleanup (_IO_acquire_lock_clear_flags2_fct)))          \
+       = (_fp);                                                              \
+    _IO_flockfile (_IO_acquire_lock_file);
 # else
 #  define _IO_acquire_lock(_fp) _IO_acquire_lock_needs_exceptions_enabled
+#  define _IO_acquire_lock_clear_flags2(_fp) _IO_acquire_lock (_fp)
 # endif
 # define _IO_release_lock(_fp) ; } while (0)
 
index 88ffe09..a676e27 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -56,11 +56,11 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
   PREPARE_CREATE;
 #endif
 
-  if (stopped)
-    /* We Make sure the thread does not run far by forcing it to get a
+  if (__builtin_expect (stopped != 0, 0))
+    /* We make sure the thread does not run far by forcing it to get a
        lock.  We lock it here too so that the new thread cannot continue
        until we tell it to.  */
-    lll_lock (pd->lock);
+    lll_lock (pd->lock, LLL_PRIVATE);
 
   /* One more thread.  We cannot have the thread do this itself, since it
      might exist but not have been scheduled yet by the time we've returned
@@ -84,7 +84,8 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
       if (IS_DETACHED (pd))
        __deallocate_stack (pd);
 
-      return errno;
+      /* We have to translate error codes.  */
+      return errno == ENOMEM ? EAGAIN : errno;
     }
 
   /* Now we have the possibility to set scheduling parameters etc.  */
@@ -97,7 +98,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
       if (attr->cpuset != NULL)
        {
          res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
-                                 sizeof (cpu_set_t), attr->cpuset);
+                                 attr->cpusetsize, attr->cpuset);
 
          if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
            {
@@ -223,7 +224,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
              __nptl_create_event ();
 
              /* And finally restart the new thread.  */
-             lll_unlock (pd->lock);
+             lll_unlock (pd->lock, LLL_PRIVATE);
            }
 
          return res;
@@ -242,6 +243,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
                       || (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))
     stopped = true;
   pd->stopped_start = stopped;
+  pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
 
   /* Actually create the thread.  */
   int res = do_clone (pd, attr, clone_flags, start_thread,
@@ -249,7 +251,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
 
   if (res == 0 && stopped)
     /* And finally restart the new thread.  */
-    lll_unlock (pd->lock);
+    lll_unlock (pd->lock, LLL_PRIVATE);
 
   return res;
 }
index 753a2d8..ad189e8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
-#include <setjmp.h>
-#include <signal.h>
-#include <stdlib.h>
 #include "pthreadP.h"
-#include "atomic.h"
 
 
-#ifdef IS_IN_librt
-/* The next two functions are similar to pthread_setcanceltype() but
-   more specialized for the use in the cancelable functions like write().
-   They do not need to check parameters etc.  */
-int
-attribute_hidden
-__librt_enable_asynccancel (void)
-{
-  struct pthread *self = THREAD_SELF;
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-
-  while (1)
-    {
-      int newval = oldval | CANCELTYPE_BITMASK;
-
-      if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
-       {
-         /* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
-            stop right here.  */
-         if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
-           break;
-
-         int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
-                                                 newval, oldval);
-         if (__builtin_expect (curval != oldval, 0))
-           {
-             /* Somebody else modified the word, try again.  */
-             oldval = curval;
-             continue;
-           }
-
-         THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-
-         __do_cancel ();
-
-         /* NOTREACHED */
-       }
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-                                             oldval);
-      if (__builtin_expect (curval == oldval, 1))
-       break;
-
-      /* Prepare the next round.  */
-      oldval = curval;
-    }
-
-  return oldval;
-}
-
-
-void
-internal_function attribute_hidden
-__librt_disable_asynccancel (int oldtype)
-{
-  /* If asynchronous cancellation was enabled before we do not have
-     anything to do.  */
-  if (oldtype & CANCELTYPE_BITMASK)
-    return;
-
-  struct pthread *self = THREAD_SELF;
-  int oldval = THREAD_GETMEM (self, cancelhandling);
-
-  while (1)
-    {
-      int newval = oldval & ~CANCELTYPE_BITMASK;
-
-      if (newval == oldval)
-       break;
-
-      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
-                                             oldval);
-      if (__builtin_expect (curval == oldval, 1))
-       break;
-
-      /* Prepare the next round.  */
-      oldval = curval;
-    }
-}
-
-
-#endif
+#define __pthread_enable_asynccancel __librt_enable_asynccancel
+#define __pthread_disable_asynccancel __librt_disable_asynccancel
+#include "cancellation.c"
index 43186a2..6ddccb9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -46,24 +46,13 @@ typedef struct list_head
 static inline void
 list_add (list_t *newp, list_t *head)
 {
-  head->next->prev = newp;
   newp->next = head->next;
   newp->prev = head;
+  head->next->prev = newp;
   head->next = newp;
 }
 
 
-/* Add new element at the tail of the list.  */
-static inline void
-list_add_tail (list_t *newp, list_t *head)
-{
-  head->prev->next = newp;
-  newp->next = head;
-  newp->prev = head->prev;
-  head->prev = newp;
-}
-
-
 /* Remove element from list.  */
 static inline void
 list_del (list_t *elem)
index efab230..e99aaa7 100644 (file)
@@ -1,6 +1,6 @@
 /* Basic platform-independent macro definitions for mutexes,
    thread-specific data and parameters for malloc.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -38,13 +38,24 @@ extern void *__dso_handle __attribute__ ((__weak__));
 
 #include <fork.h>
 
+#define ATFORK_MEM static struct fork_handler atfork_mem
+
 #ifdef SHARED
 # define thread_atfork(prepare, parent, child) \
-   __register_atfork (prepare, parent, child, __dso_handle)
+  atfork_mem.prepare_handler = prepare;                                              \
+  atfork_mem.parent_handler = parent;                                        \
+  atfork_mem.child_handler = child;                                          \
+  atfork_mem.dso_handle = __dso_handle;                                              \
+  atfork_mem.refcntr = 1;                                                    \
+  __linkin_atfork (&atfork_mem)
 #else
 # define thread_atfork(prepare, parent, child) \
-   __register_atfork (prepare, parent, child,                                \
-                     &__dso_handle == NULL ? NULL : __dso_handle)
+  atfork_mem.prepare_handler = prepare;                                              \
+  atfork_mem.parent_handler = parent;                                        \
+  atfork_mem.child_handler = child;                                          \
+  atfork_mem.dso_handle = &__dso_handle == NULL ? NULL : __dso_handle;       \
+  atfork_mem.refcntr = 1;                                                    \
+  __linkin_atfork (&atfork_mem)
 #endif
 
 /* thread specific data for glibc */
@@ -52,10 +63,10 @@ extern void *__dso_handle __attribute__ ((__weak__));
 #include <bits/libc-tsd.h>
 
 typedef int tsd_key_t[1];      /* no key data structure, libc magic does it */
-__libc_tsd_define (static, MALLOC)     /* declaration/common definition */
+__libc_tsd_define (static, void *, MALLOC)     /* declaration/common definition */
 #define tsd_key_create(key, destr)     ((void) (key))
-#define tsd_setspecific(key, data)     __libc_tsd_set (MALLOC, (data))
-#define tsd_getspecific(key, vptr)     ((vptr) = __libc_tsd_get (MALLOC))
+#define tsd_setspecific(key, data)     __libc_tsd_set (void *, MALLOC, (data))
+#define tsd_getspecific(key, vptr)     ((vptr) = __libc_tsd_get (void *, MALLOC))
 
 #include <sysdeps/generic/malloc-machine.h>
 
index 5955a7e..b26a504 100644 (file)
@@ -72,7 +72,7 @@ call_initialize_minimal (void)
 }
 
 SECTION (".init");
-extern void _init (void);
+extern void __attribute__ ((section (".init"))) _init (void);
 void
 _init (void)
 {
@@ -93,7 +93,7 @@ asm ("\n/*@_init_EPILOG_ENDS*/");
 asm ("\n/*@_fini_PROLOG_BEGINS*/");
 
 SECTION (".fini");
-extern void _fini (void);
+extern void __attribute__ ((section (".fini"))) _fini (void);
 void
 _fini (void)
 {
index b4106fd..f161380 100644 (file)
@@ -21,8 +21,6 @@
 #include <stdlib.h>
 #include "pthreadP.h"
 
-extern void __libc_longjmp (sigjmp_buf env, int val)
-       __attribute__ ((noreturn));
 void
 longjmp (jmp_buf env, int val)
 {
index 813d556..0c404fc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -23,6 +23,7 @@
 #include <pthread.h>
 #include <setjmp.h>
 #include <internaltypes.h>
+#include <sysdep.h>
 
 struct xid_command;
 
@@ -72,12 +73,8 @@ struct pthread_functions
   int (*ptr_pthread_mutex_destroy) (pthread_mutex_t *);
   int (*ptr_pthread_mutex_init) (pthread_mutex_t *,
                                 const pthread_mutexattr_t *);
-  int (*ptr_pthread_mutex_trylock) (pthread_mutex_t *);
   int (*ptr_pthread_mutex_lock) (pthread_mutex_t *);
   int (*ptr_pthread_mutex_unlock) (pthread_mutex_t *);
-  int (*ptr_pthread_mutexattr_init) (pthread_mutexattr_t *attr);
-  int (*ptr_pthread_mutexattr_destroy) (pthread_mutexattr_t *attr);
-  int (*ptr_pthread_mutexattr_settype) (pthread_mutexattr_t *attr, int kind);
   pthread_t (*ptr_pthread_self) (void);
   int (*ptr_pthread_setcancelstate) (int, int *);
   int (*ptr_pthread_setcanceltype) (int, int *);
@@ -99,9 +96,22 @@ struct pthread_functions
        __attribute ((noreturn)) __cleanup_fct_attribute;
   void (*ptr__nptl_deallocate_tsd) (void);
   int (*ptr__nptl_setxid) (struct xid_command *);
+  void (*ptr_freeres) (void);
 };
 
 /* Variable in libc.so.  */
 extern struct pthread_functions __libc_pthread_functions attribute_hidden;
+extern int __libc_pthread_functions_init attribute_hidden;
+
+#ifdef PTR_DEMANGLE
+# define PTHFCT_CALL(fct, params) \
+  ({ __typeof (__libc_pthread_functions.fct) __p;                            \
+     __p = __libc_pthread_functions.fct;                                     \
+     PTR_DEMANGLE (__p);                                                     \
+     __p params; })
+#else
+# define PTHFCT_CALL(fct, params) \
+  __libc_pthread_functions.fct params
+#endif
 
 #endif /* pthread-functions.h */
index c4bfc0e..deb7430 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +21,7 @@
 #define _PTHREAD_H     1
 
 #include <features.h>
+#include <endian.h>
 #include <sched.h>
 #include <time.h>
 
@@ -50,7 +52,7 @@ enum
   PTHREAD_MUTEX_RECURSIVE_NP,
   PTHREAD_MUTEX_ERRORCHECK_NP,
   PTHREAD_MUTEX_ADAPTIVE_NP
-#ifdef __USE_UNIX98
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
   ,
   PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
   PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
@@ -63,6 +65,30 @@ enum
 #endif
 };
 
+
+#ifdef __USE_XOPEN2K
+/* Robust mutex or not flags.  */
+enum
+{
+  PTHREAD_MUTEX_STALLED,
+  PTHREAD_MUTEX_STALLED_NP = PTHREAD_MUTEX_STALLED,
+  PTHREAD_MUTEX_ROBUST,
+  PTHREAD_MUTEX_ROBUST_NP = PTHREAD_MUTEX_ROBUST
+};
+#endif
+
+
+#ifdef __USE_UNIX98
+/* Mutex protocols.  */
+enum
+{
+  PTHREAD_PRIO_NONE,
+  PTHREAD_PRIO_INHERIT,
+  PTHREAD_PRIO_PROTECT
+};
+#endif
+
+
 /* Mutex initializers.  */
 #if __WORDSIZE == 64
 # define PTHREAD_MUTEX_INITIALIZER \
@@ -88,6 +114,7 @@ enum
 # endif
 #endif
 
+
 /* Read-write lock types.  */
 #if defined __USE_UNIX98 || defined __USE_XOPEN2K
 enum
@@ -99,21 +126,23 @@ enum
 };
 
 /* Read-write lock initializers.  */
-# if __WORDSIZE == 64
-#  define PTHREAD_RWLOCK_INITIALIZER \
+# define PTHREAD_RWLOCK_INITIALIZER \
   { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
-# else
-#  define PTHREAD_RWLOCK_INITIALIZER \
-  { { 0, 0, 0, 0, 0, 0, 0, 0 } }
-# endif
 # ifdef __USE_GNU
 #  if __WORDSIZE == 64
 #   define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
   { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,                                          \
-      PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
+       PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
 #  else
-#   define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
-  { { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, 0 } }
+#   if __BYTE_ORDER == __LITTLE_ENDIAN
+#    define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+  { { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, \
+      0, 0, 0, 0 } }
+#   else
+#    define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+  { { 0, 0, 0, 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,\
+      0 } }
+#   endif
 #  endif
 # endif
 #endif  /* Unix98 or XOpen2K */
@@ -201,7 +230,7 @@ __BEGIN_DECLS
 extern int pthread_create (pthread_t *__restrict __newthread,
                           __const pthread_attr_t *__restrict __attr,
                           void *(*__start_routine) (void *),
-                          void *__restrict __arg) __THROW;
+                          void *__restrict __arg) __THROW __nonnull ((1, 3));
 
 /* Terminate calling thread.
 
@@ -251,71 +280,78 @@ extern int pthread_equal (pthread_t __thread1, pthread_t __thread2) __THROW;
 /* Initialize thread attribute *ATTR with default attributes
    (detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER,
     no user-provided stack).  */
-extern int pthread_attr_init (pthread_attr_t *__attr) __THROW;
+extern int pthread_attr_init (pthread_attr_t *__attr) __THROW __nonnull ((1));
 
 /* Destroy thread attribute *ATTR.  */
-extern int pthread_attr_destroy (pthread_attr_t *__attr) __THROW;
+extern int pthread_attr_destroy (pthread_attr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Get detach state attribute.  */
 extern int pthread_attr_getdetachstate (__const pthread_attr_t *__attr,
-                                       int *__detachstate) __THROW;
+                                       int *__detachstate)
+     __THROW __nonnull ((1, 2));
 
 /* Set detach state attribute.  */
 extern int pthread_attr_setdetachstate (pthread_attr_t *__attr,
-                                       int __detachstate) __THROW;
+                                       int __detachstate)
+     __THROW __nonnull ((1));
 
 
 /* Get the size of the guard area created for stack overflow protection.  */
 extern int pthread_attr_getguardsize (__const pthread_attr_t *__attr,
-                                     size_t *__guardsize) __THROW;
+                                     size_t *__guardsize)
+     __THROW __nonnull ((1, 2));
 
 /* Set the size of the guard area created for stack overflow protection.  */
 extern int pthread_attr_setguardsize (pthread_attr_t *__attr,
-                                     size_t __guardsize) __THROW;
+                                     size_t __guardsize)
+     __THROW __nonnull ((1));
 
 
 /* Return in *PARAM the scheduling parameters of *ATTR.  */
 extern int pthread_attr_getschedparam (__const pthread_attr_t *__restrict
                                       __attr,
                                       struct sched_param *__restrict __param)
-     __THROW;
+     __THROW __nonnull ((1, 2));
 
 /* Set scheduling parameters (priority, etc) in *ATTR according to PARAM.  */
 extern int pthread_attr_setschedparam (pthread_attr_t *__restrict __attr,
                                       __const struct sched_param *__restrict
-                                      __param) __THROW;
+                                      __param) __THROW __nonnull ((1, 2));
 
 /* Return in *POLICY the scheduling policy of *ATTR.  */
 extern int pthread_attr_getschedpolicy (__const pthread_attr_t *__restrict
                                        __attr, int *__restrict __policy)
-     __THROW;
+     __THROW __nonnull ((1, 2));
 
 /* Set scheduling policy in *ATTR according to POLICY.  */
 extern int pthread_attr_setschedpolicy (pthread_attr_t *__attr, int __policy)
-     __THROW;
+     __THROW __nonnull ((1));
 
 /* Return in *INHERIT the scheduling inheritance mode of *ATTR.  */
 extern int pthread_attr_getinheritsched (__const pthread_attr_t *__restrict
                                         __attr, int *__restrict __inherit)
-     __THROW;
+     __THROW __nonnull ((1, 2));
 
 /* Set scheduling inheritance mode in *ATTR according to INHERIT.  */
 extern int pthread_attr_setinheritsched (pthread_attr_t *__attr,
-                                        int __inherit) __THROW;
+                                        int __inherit)
+     __THROW __nonnull ((1));
 
 
 /* Return in *SCOPE the scheduling contention scope of *ATTR.  */
 extern int pthread_attr_getscope (__const pthread_attr_t *__restrict __attr,
-                                 int *__restrict __scope) __THROW;
+                                 int *__restrict __scope)
+     __THROW __nonnull ((1, 2));
 
 /* Set scheduling contention scope in *ATTR according to SCOPE.  */
 extern int pthread_attr_setscope (pthread_attr_t *__attr, int __scope)
-     __THROW;
+     __THROW __nonnull ((1));
 
 /* Return the previously set address for the stack.  */
 extern int pthread_attr_getstackaddr (__const pthread_attr_t *__restrict
                                      __attr, void **__restrict __stackaddr)
-     __THROW __attribute_deprecated__;
+     __THROW __nonnull ((1, 2)) __attribute_deprecated__;
 
 /* Set the starting address of the stack of the thread to be created.
    Depending on whether the stack grows up or down the value must either
@@ -323,30 +359,32 @@ extern int pthread_attr_getstackaddr (__const pthread_attr_t *__restrict
    minimal size of the block must be PTHREAD_STACK_MIN.  */
 extern int pthread_attr_setstackaddr (pthread_attr_t *__attr,
                                      void *__stackaddr)
-     __THROW __attribute_deprecated__;
+     __THROW __nonnull ((1)) __attribute_deprecated__;
 
 /* Return the currently used minimal stack size.  */
 extern int pthread_attr_getstacksize (__const pthread_attr_t *__restrict
                                      __attr, size_t *__restrict __stacksize)
-     __THROW;
+     __THROW __nonnull ((1, 2));
 
 /* Add information about the minimum stack size needed for the thread
    to be started.  This size must never be less than PTHREAD_STACK_MIN
    and must also not exceed the system limits.  */
 extern int pthread_attr_setstacksize (pthread_attr_t *__attr,
-                                     size_t __stacksize) __THROW;
+                                     size_t __stacksize)
+     __THROW __nonnull ((1));
 
 #ifdef __USE_XOPEN2K
 /* Return the previously set address for the stack.  */
 extern int pthread_attr_getstack (__const pthread_attr_t *__restrict __attr,
                                  void **__restrict __stackaddr,
-                                 size_t *__restrict __stacksize) __THROW;
+                                 size_t *__restrict __stacksize)
+     __THROW __nonnull ((1, 2, 3));
 
 /* The following two interfaces are intended to replace the last two.  They
    require setting the address as well as the size since only setting the
    address will make the implementation on some architectures impossible.  */
 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
-                                 size_t __stacksize) __THROW;
+                                 size_t __stacksize) __THROW __nonnull ((1));
 #endif
 
 #ifdef __USE_GNU
@@ -354,19 +392,22 @@ extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
    the processors represented in CPUSET.  */
 extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr,
                                        size_t __cpusetsize,
-                                       __const cpu_set_t *__cpuset) __THROW;
+                                       __const cpu_set_t *__cpuset)
+     __THROW __nonnull ((1, 3));
 
 /* Get bit set in CPUSET representing the processors threads created with
    ATTR can run on.  */
 extern int pthread_attr_getaffinity_np (__const pthread_attr_t *__attr,
                                        size_t __cpusetsize,
-                                       cpu_set_t *__cpuset) __THROW;
+                                       cpu_set_t *__cpuset)
+     __THROW __nonnull ((1, 3));
 
 
 /* Initialize thread attribute *ATTR with attributes corresponding to the
-   already running thread TH.  It shall be called on unitialized ATTR
+   already running thread TH.  It shall be called on uninitialized ATTR
    and destroyed with pthread_attr_destroy when no longer needed.  */
-extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) __THROW;
+extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr)
+     __THROW __nonnull ((2));
 #endif
 
 
@@ -376,13 +417,13 @@ extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) __THROW;
    and *PARAM.  */
 extern int pthread_setschedparam (pthread_t __target_thread, int __policy,
                                  __const struct sched_param *__param)
-     __THROW;
+     __THROW __nonnull ((3));
 
 /* Return in *POLICY and *PARAM the scheduling parameters for TARGET_THREAD. */
 extern int pthread_getschedparam (pthread_t __target_thread,
                                  int *__restrict __policy,
                                  struct sched_param *__restrict __param)
-     __THROW;
+     __THROW __nonnull ((2, 3));
 
 /* Set the scheduling priority for TARGET_THREAD.  */
 extern int pthread_setschedprio (pthread_t __target_thread, int __prio)
@@ -408,11 +449,13 @@ extern int pthread_yield (void) __THROW;
 /* Limit specified thread TH to run only on the processors represented
    in CPUSET.  */
 extern int pthread_setaffinity_np (pthread_t __th, size_t __cpusetsize,
-                                  __const cpu_set_t *__cpuset) __THROW;
+                                  __const cpu_set_t *__cpuset)
+     __THROW __nonnull ((3));
 
 /* Get bit set in CPUSET representing the processors TH can run on.  */
 extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
-                                  cpu_set_t *__cpuset) __THROW;
+                                  cpu_set_t *__cpuset)
+     __THROW __nonnull ((3));
 #endif
 
 
@@ -426,7 +469,7 @@ extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
    The initialization functions might throw exception which is why
    this function is not marked with __THROW.  */
 extern int pthread_once (pthread_once_t *__once_control,
-                        void (*__init_routine) (void));
+                        void (*__init_routine) (void)) __nonnull ((1, 2));
 
 
 /* Functions for handling cancellation.
@@ -539,9 +582,6 @@ class __pthread_cleanup_class
    needed or fall back on the copy which must exist somewhere
    else.  */
 __extern_inline void
-__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame);
-
-__extern_inline void
 __pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
 {
   if (__frame->__do_it)
@@ -603,7 +643,7 @@ __pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
     __pthread_unwind_buf_t __cancel_buf;                                     \
     void (*__cancel_routine) (void *) = (routine);                           \
     void *__cancel_arg = (arg);                                                      \
-    int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *)               \
+    int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *)              \
                                      __cancel_buf.__cancel_jmp_buf, 0);      \
     if (__builtin_expect (not_first_call, 0))                                \
       {                                                                              \
@@ -620,6 +660,7 @@ extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
 /* Remove a cleanup handler installed by the matching pthread_cleanup_push.
    If EXECUTE is non-zero, the handler function is called. */
 # define pthread_cleanup_pop(execute) \
+      do { } while (0);/* Empty to allow label before pthread_cleanup_pop.  */\
     } while (0);                                                             \
     __pthread_unregister_cancel (&__cancel_buf);                             \
     if (execute)                                                             \
@@ -637,7 +678,7 @@ extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
     __pthread_unwind_buf_t __cancel_buf;                                     \
     void (*__cancel_routine) (void *) = (routine);                           \
     void *__cancel_arg = (arg);                                                      \
-    int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *)               \
+    int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *)              \
                                      __cancel_buf.__cancel_jmp_buf, 0);      \
     if (__builtin_expect (not_first_call, 0))                                \
       {                                                                              \
@@ -655,6 +696,7 @@ extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
    restores the cancellation type that was in effect when the matching
    pthread_cleanup_push_defer was called.  */
 #  define pthread_cleanup_pop_restore_np(execute) \
+      do { } while (0);/* Empty to allow label before pthread_cleanup_pop.  */\
     } while (0);                                                             \
     __pthread_unregister_cancel_restore (&__cancel_buf);                     \
     if (execute)                                                             \
@@ -666,9 +708,9 @@ extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf)
 
 /* Internal interface to initiate cleanup.  */
 extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
-     __cleanup_fct_attribute __attribute ((__noreturn__))
+     __cleanup_fct_attribute __attribute__ ((__noreturn__))
 # ifndef SHARED
-     __attribute ((__weak__))
+     __attribute__ ((__weak__))
 # endif
      ;
 #endif
@@ -683,56 +725,135 @@ extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __THROW;
 /* Initialize a mutex.  */
 extern int pthread_mutex_init (pthread_mutex_t *__mutex,
                               __const pthread_mutexattr_t *__mutexattr)
-     __THROW;
+     __THROW __nonnull ((1));
 
 /* Destroy a mutex.  */
-extern int pthread_mutex_destroy (pthread_mutex_t *__mutex) __THROW;
+extern int pthread_mutex_destroy (pthread_mutex_t *__mutex)
+     __THROW __nonnull ((1));
 
 /* Try locking a mutex.  */
-extern int pthread_mutex_trylock (pthread_mutex_t *_mutex) __THROW;
+extern int pthread_mutex_trylock (pthread_mutex_t *__mutex)
+     __THROW __nonnull ((1));
 
 /* Lock a mutex.  */
-extern int pthread_mutex_lock (pthread_mutex_t *__mutex) __THROW;
+extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
+     __THROW __nonnull ((1));
 
 #ifdef __USE_XOPEN2K
 /* Wait until lock becomes available, or specified time passes. */
 extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
-                                    __const struct timespec *__restrict
-                                    __abstime) __THROW;
+                                   __const struct timespec *__restrict
+                                   __abstime) __THROW __nonnull ((1, 2));
 #endif
 
 /* Unlock a mutex.  */
-extern int pthread_mutex_unlock (pthread_mutex_t *__mutex) __THROW;
+extern int pthread_mutex_unlock (pthread_mutex_t *__mutex)
+     __THROW __nonnull ((1));
+
+
+/* Get the priority ceiling of MUTEX.  */
+extern int pthread_mutex_getprioceiling (__const pthread_mutex_t *
+                                        __restrict __mutex,
+                                        int *__restrict __prioceiling)
+     __THROW __nonnull ((1, 2));
+
+/* Set the priority ceiling of MUTEX to PRIOCEILING, return old
+   priority ceiling value in *OLD_CEILING.  */
+extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex,
+                                        int __prioceiling,
+                                        int *__restrict __old_ceiling)
+     __THROW __nonnull ((1, 3));
+
+
+#ifdef __USE_XOPEN2K8
+/* Declare the state protected by MUTEX as consistent.  */
+extern int pthread_mutex_consistent (pthread_mutex_t *__mutex)
+     __THROW __nonnull ((1));
+# ifdef __USE_GNU
+extern int pthread_mutex_consistent_np (pthread_mutex_t *__mutex)
+     __THROW __nonnull ((1));
+# endif
+#endif
 
 
 /* Functions for handling mutex attributes.  */
 
 /* Initialize mutex attribute object ATTR with default attributes
    (kind is PTHREAD_MUTEX_TIMED_NP).  */
-extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr) __THROW;
+extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Destroy mutex attribute object ATTR.  */
-extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr) __THROW;
+extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Get the process-shared flag of the mutex attribute ATTR.  */
 extern int pthread_mutexattr_getpshared (__const pthread_mutexattr_t *
                                         __restrict __attr,
-                                        int *__restrict __pshared) __THROW;
+                                        int *__restrict __pshared)
+     __THROW __nonnull ((1, 2));
 
 /* Set the process-shared flag of the mutex attribute ATTR.  */
 extern int pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr,
-                                        int __pshared) __THROW;
+                                        int __pshared)
+     __THROW __nonnull ((1));
 
-#ifdef __USE_UNIX98
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
 /* Return in *KIND the mutex kind attribute in *ATTR.  */
 extern int pthread_mutexattr_gettype (__const pthread_mutexattr_t *__restrict
-                                     __attr, int *__restrict __kind) __THROW;
+                                     __attr, int *__restrict __kind)
+     __THROW __nonnull ((1, 2));
 
 /* Set the mutex kind attribute in *ATTR to KIND (either PTHREAD_MUTEX_NORMAL,
    PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK, or
    PTHREAD_MUTEX_DEFAULT).  */
 extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
-     __THROW;
+     __THROW __nonnull ((1));
+#endif
+
+/* Return in *PROTOCOL the mutex protocol attribute in *ATTR.  */
+extern int pthread_mutexattr_getprotocol (__const pthread_mutexattr_t *
+                                         __restrict __attr,
+                                         int *__restrict __protocol)
+     __THROW __nonnull ((1, 2));
+
+/* Set the mutex protocol attribute in *ATTR to PROTOCOL (either
+   PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT, or PTHREAD_PRIO_PROTECT).  */
+extern int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr,
+                                         int __protocol)
+     __THROW __nonnull ((1));
+
+/* Return in *PRIOCEILING the mutex prioceiling attribute in *ATTR.  */
+extern int pthread_mutexattr_getprioceiling (__const pthread_mutexattr_t *
+                                            __restrict __attr,
+                                            int *__restrict __prioceiling)
+     __THROW __nonnull ((1, 2));
+
+/* Set the mutex prioceiling attribute in *ATTR to PRIOCEILING.  */
+extern int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *__attr,
+                                            int __prioceiling)
+     __THROW __nonnull ((1));
+
+#ifdef __USE_XOPEN2K
+/* Get the robustness flag of the mutex attribute ATTR.  */
+extern int pthread_mutexattr_getrobust (__const pthread_mutexattr_t *__attr,
+                                       int *__robustness)
+     __THROW __nonnull ((1, 2));
+# ifdef __USE_GNU
+extern int pthread_mutexattr_getrobust_np (__const pthread_mutexattr_t *__attr,
+                                          int *__robustness)
+     __THROW __nonnull ((1, 2));
+# endif
+
+/* Set the robustness flag of the mutex attribute ATTR.  */
+extern int pthread_mutexattr_setrobust (pthread_mutexattr_t *__attr,
+                                       int __robustness)
+     __THROW __nonnull ((1));
+# ifdef __USE_GNU
+extern int pthread_mutexattr_setrobust_np (pthread_mutexattr_t *__attr,
+                                          int __robustness)
+     __THROW __nonnull ((1));
+# endif
 #endif
 
 
@@ -743,66 +864,77 @@ extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
    the default values if later is NULL.  */
 extern int pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
                                __const pthread_rwlockattr_t *__restrict
-                               __attr) __THROW;
+                               __attr) __THROW __nonnull ((1));
 
 /* Destroy read-write lock RWLOCK.  */
-extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock)
+     __THROW __nonnull ((1));
 
 /* Acquire read lock for RWLOCK.  */
-extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock)
+     __THROW __nonnull ((1));
 
 /* Try to acquire read lock for RWLOCK.  */
-extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
+  __THROW __nonnull ((1));
 
 # ifdef __USE_XOPEN2K
 /* Try to acquire read lock for RWLOCK or return after specfied time.  */
 extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
                                       __const struct timespec *__restrict
-                                      __abstime) __THROW;
+                                      __abstime) __THROW __nonnull ((1, 2));
 # endif
 
 /* Acquire write lock for RWLOCK.  */
-extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock)
+     __THROW __nonnull ((1));
 
 /* Try to acquire write lock for RWLOCK.  */
-extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
+     __THROW __nonnull ((1));
 
 # ifdef __USE_XOPEN2K
 /* Try to acquire write lock for RWLOCK or return after specfied time.  */
 extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
                                       __const struct timespec *__restrict
-                                      __abstime) __THROW;
+                                      __abstime) __THROW __nonnull ((1, 2));
 # endif
 
 /* Unlock RWLOCK.  */
-extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock)
+     __THROW __nonnull ((1));
 
 
 /* Functions for handling read-write lock attributes.  */
 
 /* Initialize attribute object ATTR with default values.  */
-extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr) __THROW;
+extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Destroy attribute object ATTR.  */
-extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr) __THROW;
+extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Return current setting of process-shared attribute of ATTR in PSHARED.  */
 extern int pthread_rwlockattr_getpshared (__const pthread_rwlockattr_t *
                                          __restrict __attr,
-                                         int *__restrict __pshared) __THROW;
+                                         int *__restrict __pshared)
+     __THROW __nonnull ((1, 2));
 
 /* Set process-shared attribute of ATTR to PSHARED.  */
 extern int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *__attr,
-                                         int __pshared) __THROW;
+                                         int __pshared)
+     __THROW __nonnull ((1));
 
 /* Return current setting of reader/writer preference.  */
 extern int pthread_rwlockattr_getkind_np (__const pthread_rwlockattr_t *
                                          __restrict __attr,
-                                         int *__restrict __pref) __THROW;
+                                         int *__restrict __pref)
+     __THROW __nonnull ((1, 2));
 
 /* Set reader/write preference.  */
 extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
-                                         int __pref) __THROW;
+                                         int __pref) __THROW __nonnull ((1));
 #endif
 
 
@@ -812,16 +944,19 @@ extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
    the default values if later is NULL.  */
 extern int pthread_cond_init (pthread_cond_t *__restrict __cond,
                              __const pthread_condattr_t *__restrict
-                             __cond_attr) __THROW;
+                             __cond_attr) __THROW __nonnull ((1));
 
 /* Destroy condition variable COND.  */
-extern int pthread_cond_destroy (pthread_cond_t *__cond) __THROW;
+extern int pthread_cond_destroy (pthread_cond_t *__cond)
+     __THROW __nonnull ((1));
 
 /* Wake up one thread waiting for condition variable COND.  */
-extern int pthread_cond_signal (pthread_cond_t *__cond) __THROW;
+extern int pthread_cond_signal (pthread_cond_t *__cond)
+     __THROW __nonnull ((1));
 
 /* Wake up all threads waiting for condition variables COND.  */
-extern int pthread_cond_broadcast (pthread_cond_t *__cond) __THROW;
+extern int pthread_cond_broadcast (pthread_cond_t *__cond)
+     __THROW __nonnull ((1));
 
 /* Wait for condition variable COND to be signaled or broadcast.
    MUTEX is assumed to be locked before.
@@ -829,7 +964,8 @@ extern int pthread_cond_broadcast (pthread_cond_t *__cond) __THROW;
    This function is a cancellation point and therefore not marked with
    __THROW.  */
 extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
-                             pthread_mutex_t *__restrict __mutex);
+                             pthread_mutex_t *__restrict __mutex)
+     __nonnull ((1, 2));
 
 /* Wait for condition variable COND to be signaled or broadcast until
    ABSTIME.  MUTEX is assumed to be locked before.  ABSTIME is an
@@ -841,36 +977,39 @@ extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
 extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
                                   pthread_mutex_t *__restrict __mutex,
                                   __const struct timespec *__restrict
-                                  __abstime);
+                                  __abstime) __nonnull ((1, 2, 3));
 
 /* Functions for handling condition variable attributes.  */
 
 /* Initialize condition variable attribute ATTR.  */
-extern int pthread_condattr_init (pthread_condattr_t *__attr) __THROW;
+extern int pthread_condattr_init (pthread_condattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Destroy condition variable attribute ATTR.  */
-extern int pthread_condattr_destroy (pthread_condattr_t *__attr) __THROW;
+extern int pthread_condattr_destroy (pthread_condattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Get the process-shared flag of the condition variable attribute ATTR.  */
 extern int pthread_condattr_getpshared (__const pthread_condattr_t *
-                                        __restrict __attr,
-                                        int *__restrict __pshared) __THROW;
+                                       __restrict __attr,
+                                       int *__restrict __pshared)
+     __THROW __nonnull ((1, 2));
 
 /* Set the process-shared flag of the condition variable attribute ATTR.  */
 extern int pthread_condattr_setpshared (pthread_condattr_t *__attr,
-                                        int __pshared) __THROW;
+                                       int __pshared) __THROW __nonnull ((1));
 
 #ifdef __USE_XOPEN2K
 /* Get the clock selected for the conditon variable attribute ATTR.  */
 extern int pthread_condattr_getclock (__const pthread_condattr_t *
                                      __restrict __attr,
                                      __clockid_t *__restrict __clock_id)
-     __THROW;
+     __THROW __nonnull ((1, 2));
 
 /* Set the clock selected for the conditon variable attribute ATTR.  */
 extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
-                                     __clockid_t __clock_id) __THROW;
-
+                                     __clockid_t __clock_id)
+     __THROW __nonnull ((1));
 #endif
 
 
@@ -880,19 +1019,23 @@ extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
 /* Initialize the spinlock LOCK.  If PSHARED is nonzero the spinlock can
    be shared between different processes.  */
 extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared)
-     __THROW;
+     __THROW __nonnull ((1));
 
 /* Destroy the spinlock LOCK.  */
-extern int pthread_spin_destroy (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_destroy (pthread_spinlock_t *__lock)
+     __THROW __nonnull ((1));
 
 /* Wait until spinlock LOCK is retrieved.  */
-extern int pthread_spin_lock (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_lock (pthread_spinlock_t *__lock)
+     __THROW __nonnull ((1));
 
 /* Try to lock spinlock LOCK.  */
-extern int pthread_spin_trylock (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_trylock (pthread_spinlock_t *__lock)
+     __THROW __nonnull ((1));
 
 /* Release spinlock LOCK.  */
-extern int pthread_spin_unlock (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_unlock (pthread_spinlock_t *__lock)
+     __THROW __nonnull ((1));
 
 
 /* Functions to handle barriers.  */
@@ -901,29 +1044,36 @@ extern int pthread_spin_unlock (pthread_spinlock_t *__lock) __THROW;
    opened when COUNT waiters arrived.  */
 extern int pthread_barrier_init (pthread_barrier_t *__restrict __barrier,
                                 __const pthread_barrierattr_t *__restrict
-                                __attr, unsigned int __count) __THROW;
+                                __attr, unsigned int __count)
+     __THROW __nonnull ((1));
 
 /* Destroy a previously dynamically initialized barrier BARRIER.  */
-extern int pthread_barrier_destroy (pthread_barrier_t *__barrier) __THROW;
+extern int pthread_barrier_destroy (pthread_barrier_t *__barrier)
+     __THROW __nonnull ((1));
 
 /* Wait on barrier BARRIER.  */
-extern int pthread_barrier_wait (pthread_barrier_t *__barrier) __THROW;
+extern int pthread_barrier_wait (pthread_barrier_t *__barrier)
+     __THROW __nonnull ((1));
 
 
 /* Initialize barrier attribute ATTR.  */
-extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr) __THROW;
+extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Destroy previously dynamically initialized barrier attribute ATTR.  */
-extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr) __THROW;
+extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr)
+     __THROW __nonnull ((1));
 
 /* Get the process-shared flag of the barrier attribute ATTR.  */
 extern int pthread_barrierattr_getpshared (__const pthread_barrierattr_t *
                                           __restrict __attr,
-                                          int *__restrict __pshared) __THROW;
+                                          int *__restrict __pshared)
+     __THROW __nonnull ((1, 2));
 
 /* Set the process-shared flag of the barrier attribute ATTR.  */
 extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
-                                           int __pshared) __THROW;
+                                          int __pshared)
+     __THROW __nonnull ((1));
 #endif
 
 
@@ -936,7 +1086,8 @@ extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
    DESTR_FUNCTION is not called if the value associated is NULL when
    the key is destroyed.  */
 extern int pthread_key_create (pthread_key_t *__key,
-                              void (*__destr_function) (void *)) __THROW;
+                              void (*__destr_function) (void *))
+     __THROW __nonnull ((1));
 
 /* Destroy KEY.  */
 extern int pthread_key_delete (pthread_key_t __key) __THROW;
@@ -946,13 +1097,14 @@ extern void *pthread_getspecific (pthread_key_t __key) __THROW;
 
 /* Store POINTER in the thread-specific data slot identified by KEY. */
 extern int pthread_setspecific (pthread_key_t __key,
-                               __const void *__pointer) __THROW;
+                               __const void *__pointer) __THROW ;
 
 
 #ifdef __USE_XOPEN2K
 /* Get ID of CPU-time clock for thread THREAD_ID.  */
 extern int pthread_getcpuclockid (pthread_t __thread_id,
-                                 __clockid_t *__clock_id) __THROW;
+                                 __clockid_t *__clock_id)
+     __THROW __nonnull ((2));
 #endif
 
 
@@ -971,6 +1123,16 @@ extern int pthread_atfork (void (*__prepare) (void),
                           void (*__parent) (void),
                           void (*__child) (void)) __THROW;
 
+
+#ifdef __USE_EXTERN_INLINES
+/* Optimizations.  */
+__extern_inline int
+__NTH (pthread_equal (pthread_t __thread1, pthread_t __thread2))
+{
+  return __thread1 == __thread2;
+}
+#endif
+
 __END_DECLS
 
 #endif /* pthread.h */
index d21ed79..d113539 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
 
 /* Wait on barrier.  */
 int
-pthread_barrier_wait (pthread_barrier_t *barrier)
+pthread_barrier_wait (
+     pthread_barrier_t *barrier)
 {
   struct pthread_barrier *ibarrier = (struct pthread_barrier *) barrier;
   int result = 0;
 
   /* Make sure we are alone.  */
-  lll_lock (ibarrier->lock);
+  lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   /* One more arrival.  */
   --ibarrier->left;
@@ -44,7 +45,8 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
       ++ibarrier->curr_event;
 
       /* Wake up everybody.  */
-      lll_futex_wake (&ibarrier->curr_event, INT_MAX);
+      lll_futex_wake (&ibarrier->curr_event, INT_MAX,
+                     ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
       /* This is the thread which finished the serialization.  */
       result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -56,11 +58,12 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
       unsigned int event = ibarrier->curr_event;
 
       /* Before suspending, make the barrier available to others.  */
-      lll_unlock (ibarrier->lock);
+      lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
       /* Wait for the event counter of the barrier to change.  */
       do
-       lll_futex_wait (&ibarrier->curr_event, event);
+       lll_futex_wait (&ibarrier->curr_event, event,
+                       ibarrier->private ^ FUTEX_PRIVATE_FLAG);
       while (event == ibarrier->curr_event);
     }
 
@@ -70,7 +73,7 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
   /* If this was the last woken thread, unlock.  */
   if (atomic_increment_val (&ibarrier->left) == init_count)
     /* We are done.  */
-    lll_unlock (ibarrier->lock);
+    lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   return result;
 }
index f6e83ed..5e74657 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
 #include <lowlevellock.h>
 #include <pthread.h>
 #include <pthreadP.h>
+
 #include <bits/kernel-features.h>
 
 
 int
-__pthread_cond_broadcast (pthread_cond_t *cond)
+__pthread_cond_broadcast (
+     pthread_cond_t *cond)
 {
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+               ? LLL_SHARED : LLL_PRIVATE;
   /* Make sure we are alone.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Are there any waiters to be woken?  */
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -44,7 +48,7 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
       ++cond->__data.__broadcast_seq;
 
       /* We are done.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, pshared);
 
       /* Do not use requeue for pshared condvars.  */
       if (cond->__data.__mutex == (void *) ~0l)
@@ -52,15 +56,24 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
 
       /* Wake everybody.  */
       pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
+
+      /* XXX: Kernel so far doesn't support requeue to PI futex.  */
+      /* XXX: Kernel so far can only requeue to the same type of futex,
+        in this case private (we don't requeue for pshared condvars).  */
+      if (__builtin_expect (mut->__data.__kind
+                           & (PTHREAD_MUTEX_PRIO_INHERIT_NP
+                              | PTHREAD_MUTEX_PSHARED_BIT), 0))
+       goto wake_all;
+
       /* lll_futex_requeue returns 0 for success and non-zero
         for errors.  */
       if (__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1,
                                               INT_MAX, &mut->__data.__lock,
-                                              futex_val), 0))
+                                              futex_val, LLL_PRIVATE), 0))
        {
          /* The requeue functionality is not available.  */
        wake_all:
-         lll_futex_wake (&cond->__data.__futex, INT_MAX);
+         lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared);
        }
 
       /* That's all.  */
@@ -68,8 +81,9 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
     }
 
   /* We are done.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, pshared);
 
   return 0;
 }
+
 weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
index 5091bea..d66f3ed 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
 #include <lowlevellock.h>
 #include <pthread.h>
 #include <pthreadP.h>
+
 #include <bits/kernel-features.h>
 
 
 int
-__pthread_cond_signal (pthread_cond_t *cond)
+__pthread_cond_signal (
+     pthread_cond_t *cond)
 {
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+               ? LLL_SHARED : LLL_PRIVATE;
+
   /* Make sure we are alone.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Are there any waiters to be woken?  */
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -40,12 +45,18 @@ __pthread_cond_signal (pthread_cond_t *cond)
       ++cond->__data.__futex;
 
       /* Wake one.  */
-      lll_futex_wake (&cond->__data.__futex, 1);
+      if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1,
+                                                    1, &cond->__data.__lock,
+                                                    pshared), 0))
+       return 0;
+
+      lll_futex_wake (&cond->__data.__futex, 1, pshared);
     }
 
   /* We are done.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, pshared);
 
   return 0;
 }
+
 weak_alias(__pthread_cond_signal, pthread_cond_signal)
index 1f4136e..4aaf5df 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
@@ -23,6 +23,7 @@
 #include <lowlevellock.h>
 #include <pthread.h>
 #include <pthreadP.h>
+#include <bits/kernel-features.h>
 
 
 /* Cleanup handler, defined in pthread_cond_wait.c.  */
@@ -51,21 +52,24 @@ __pthread_cond_timedwait (
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
     return EINVAL;
 
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+               ? LLL_SHARED : LLL_PRIVATE;
+
   /* Make sure we are along.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Now we can release the mutex.  */
   int err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (err)
     {
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, pshared);
       return err;
     }
 
   /* We have one new user of the condvar.  */
   ++cond->__data.__total_seq;
   ++cond->__data.__futex;
-  cond->__data.__nwaiters += 1 << COND_CLOCK_BITS;
+  cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
 
   /* Remember the mutex we are using here.  If there is already a
      different address store this is a bad user bug.  Do not store
@@ -98,7 +102,7 @@ __pthread_cond_timedwait (
        int ret;
        ret = INTERNAL_SYSCALL (clock_gettime, err, 2,
                                (cond->__data.__nwaiters
-                                & ((1 << COND_CLOCK_BITS) - 1)),
+                                & ((1 << COND_NWAITERS_SHIFT) - 1)),
                                &rt);
 # ifndef __ASSUME_POSIX_TIMERS
        if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (ret, err), 0))
@@ -144,20 +148,20 @@ __pthread_cond_timedwait (
       unsigned int futex_val = cond->__data.__futex;
 
       /* Prepare to wait.  Release the condvar futex.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, pshared);
 
       /* Enable asynchronous cancellation.  Required by the standard.  */
       cbuffer.oldtype = __pthread_enable_asynccancel ();
 
       /* Wait until woken by signal or broadcast.  */
       err = lll_futex_timed_wait (&cond->__data.__futex,
-                                 futex_val, &rt);
+                                 futex_val, &rt, pshared);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (cbuffer.oldtype);
 
       /* We are going to look at shared data again, so get the lock.  */
-      lll_mutex_lock(cond->__data.__lock);
+      lll_lock (cond->__data.__lock, pshared);
 
       /* If a broadcast happened, we are done.  */
       if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -187,17 +191,17 @@ __pthread_cond_timedwait (
 
  bc_out:
 
-  cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
+  cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
 
   /* If pthread_cond_destroy was called on this variable already,
      notify the pthread_cond_destroy caller all waiters have left
      and it can be successfully destroyed.  */
   if (cond->__data.__total_seq == -1ULL
-      && cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
-    lll_futex_wake (&cond->__data.__nwaiters, 1);
+      && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
+    lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
 
   /* We are done with the condvar.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, pshared);
 
   /* The cancellation handling is back to normal, remove the handler.  */
   __pthread_cleanup_pop (&buffer, 0);
@@ -207,4 +211,5 @@ __pthread_cond_timedwait (
 
   return err ?: result;
 }
+
 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
index 79245b7..2fac02d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
@@ -41,38 +41,46 @@ __condvar_cleanup (void *arg)
   struct _condvar_cleanup_buffer *cbuffer =
     (struct _condvar_cleanup_buffer *) arg;
   unsigned int destroying;
+  int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l)
+               ? LLL_SHARED : LLL_PRIVATE;
 
   /* We are going to modify shared data.  */
-  lll_mutex_lock (cbuffer->cond->__data.__lock);
+  lll_lock (cbuffer->cond->__data.__lock, pshared);
 
   if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
     {
       /* This thread is not waiting anymore.  Adjust the sequence counters
-        appropriately.  */
-      ++cbuffer->cond->__data.__wakeup_seq;
+        appropriately.  We do not increment WAKEUP_SEQ if this would
+        bump it over the value of TOTAL_SEQ.  This can happen if a thread
+        was woken and then canceled.  */
+      if (cbuffer->cond->__data.__wakeup_seq
+         < cbuffer->cond->__data.__total_seq)
+       {
+         ++cbuffer->cond->__data.__wakeup_seq;
+         ++cbuffer->cond->__data.__futex;
+       }
       ++cbuffer->cond->__data.__woken_seq;
-      ++cbuffer->cond->__data.__futex;
     }
 
-  cbuffer->cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
+  cbuffer->cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
 
   /* If pthread_cond_destroy was called on this variable already,
      notify the pthread_cond_destroy caller all waiters have left
      and it can be successfully destroyed.  */
   destroying = 0;
   if (cbuffer->cond->__data.__total_seq == -1ULL
-      && cbuffer->cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
+      && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
     {
-      lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1);
+      lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared);
       destroying = 1;
     }
 
   /* We are done.  */
-  lll_mutex_unlock (cbuffer->cond->__data.__lock);
+  lll_unlock (cbuffer->cond->__data.__lock, pshared);
 
   /* Wake everybody to make sure no condvar signal gets lost.  */
   if (! destroying)
-    lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX);
+    lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared);
 
   /* Get the mutex before returning unless asynchronous cancellation
      is in effect.  */
@@ -88,22 +96,24 @@ __pthread_cond_wait (
   struct _pthread_cleanup_buffer buffer;
   struct _condvar_cleanup_buffer cbuffer;
   int err;
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+               ? LLL_SHARED : LLL_PRIVATE;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Now we can release the mutex.  */
   err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (__builtin_expect (err, 0))
     {
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, pshared);
       return err;
     }
 
   /* We have one new user of the condvar.  */
   ++cond->__data.__total_seq;
   ++cond->__data.__futex;
-  cond->__data.__nwaiters += 1 << COND_CLOCK_BITS;
+  cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
 
   /* Remember the mutex we are using here.  If there is already a
      different address store this is a bad user bug.  Do not store
@@ -132,19 +142,19 @@ __pthread_cond_wait (
       unsigned int futex_val = cond->__data.__futex;
 
       /* Prepare to wait.  Release the condvar futex.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, pshared);
 
       /* Enable asynchronous cancellation.  Required by the standard.  */
       cbuffer.oldtype = __pthread_enable_asynccancel ();
 
       /* Wait until woken by signal or broadcast.  */
-      lll_futex_wait (&cond->__data.__futex, futex_val);
+      lll_futex_wait (&cond->__data.__futex, futex_val, pshared);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (cbuffer.oldtype);
 
       /* We are going to look at shared data again, so get the lock.  */
-      lll_mutex_lock (cond->__data.__lock);
+      lll_lock (cond->__data.__lock, pshared);
 
       /* If a broadcast happened, we are done.  */
       if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -160,17 +170,17 @@ __pthread_cond_wait (
 
  bc_out:
 
-  cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
+  cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
 
   /* If pthread_cond_destroy was called on this varaible already,
      notify the pthread_cond_destroy caller all waiters have left
      and it can be successfully destroyed.  */
   if (cond->__data.__total_seq == -1ULL
-      && cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
-    lll_futex_wake (&cond->__data.__nwaiters, 1);
+      && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
+    lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
 
   /* We are done with the condvar.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, pshared);
 
   /* The cancellation handling is back to normal, remove the handler.  */
   __pthread_cleanup_pop (&buffer, 0);
@@ -178,4 +188,5 @@ __pthread_cond_wait (
   /* Get the mutex before returning.  */
   return __pthread_mutex_cond_lock (mutex);
 }
+
 weak_alias(__pthread_cond_wait, pthread_cond_wait)
index fc16bc5..57bb6b9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -22,7 +22,7 @@
 
 
 
-static lll_lock_t once_lock = LLL_LOCK_INITIALIZER;
+static int once_lock = LLL_LOCK_INITIALIZER;
 
 
 int
@@ -35,7 +35,7 @@ __pthread_once (
      object.  */
   if (*once_control == PTHREAD_ONCE_INIT)
     {
-      lll_lock (once_lock);
+      lll_lock (once_lock, LLL_PRIVATE);
 
       /* XXX This implementation is not complete.  It doesn't take
         cancelation and fork into account.  */
@@ -46,7 +46,7 @@ __pthread_once (
          *once_control = !PTHREAD_ONCE_INIT;
        }
 
-      lll_unlock (once_lock);
+      lll_unlock (once_lock, LLL_PRIVATE);
     }
 
   return 0;
index 2fdcc49..dc00f2a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
 
 /* Acquire read lock for RWLOCK.  */
 int
-__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
+__pthread_rwlock_rdlock (
+     pthread_rwlock_t *rwlock)
 {
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -39,7 +40,7 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
       if (rwlock->__data.__writer == 0
          /* ...and if either no writer is waiting or we prefer readers.  */
          && (!rwlock->__data.__nr_writers_queued
-             || rwlock->__data.__flags == 0))
+             || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
        {
          /* Increment the reader counter.  Avoid overflow.  */
          if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
@@ -73,19 +74,20 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
       int waitval = rwlock->__data.__readers_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer to finish.  */
-      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval);
+      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
+                     rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       --rwlock->__data.__nr_readers_queued;
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
index 8503788..3daefc7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock(rwlock->__data.__lock);
+  lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -43,7 +43,7 @@ pthread_rwlock_timedrdlock (
       if (rwlock->__data.__writer == 0
          /* ...and if either no writer is waiting or we prefer readers.  */
          && (!rwlock->__data.__nr_writers_queued
-             || rwlock->__data.__flags == 0))
+             || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
        {
          /* Increment the reader counter.  Avoid overflow.  */
          if (++rwlock->__data.__nr_readers == 0)
@@ -110,14 +110,14 @@ pthread_rwlock_timedrdlock (
       int waitval = rwlock->__data.__readers_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer to finish.  */
       err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup,
-                                 waitval, &rt);
+                                 waitval, &rt, rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       --rwlock->__data.__nr_readers_queued;
 
@@ -131,7 +131,7 @@ pthread_rwlock_timedrdlock (
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
index d9caa85..e6fcb16 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
@@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -100,14 +100,14 @@ pthread_rwlock_timedwrlock (
       int waitval = rwlock->__data.__writer_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer or reader(s) to finish.  */
       err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup,
-                                 waitval, &rt);
+                                 waitval, &rt, rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* To start over again, remove the thread from the writer list.  */
       --rwlock->__data.__nr_writers_queued;
@@ -121,7 +121,7 @@ pthread_rwlock_timedwrlock (
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
index 9cae8b6..a7ef71a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
@@ -27,7 +27,7 @@
 int
 __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
 {
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
   if (rwlock->__data.__writer)
     rwlock->__data.__writer = 0;
   else
@@ -37,19 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
       if (rwlock->__data.__nr_writers_queued)
        {
          ++rwlock->__data.__writer_wakeup;
-         lll_mutex_unlock (rwlock->__data.__lock);
-         lll_futex_wake (&rwlock->__data.__writer_wakeup, 1);
+         lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+         lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
+                         rwlock->__data.__shared);
          return 0;
        }
       else if (rwlock->__data.__nr_readers_queued)
        {
          ++rwlock->__data.__readers_wakeup;
-         lll_mutex_unlock (rwlock->__data.__lock);
-         lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX);
+         lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+         lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
+                         rwlock->__data.__shared);
          return 0;
        }
     }
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
   return 0;
 }
 
index 1b9186f..81e6daa 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
 
 
 /* Acquire write lock for RWLOCK.  */
 int
-__pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
+__pthread_rwlock_wrlock (
+     pthread_rwlock_t *rwlock)
 {
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -64,20 +65,21 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
       int waitval = rwlock->__data.__writer_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer or reader(s) to finish.  */
-      lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval);
+      lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
+                     rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* To start over again, remove the thread from the writer list.  */
       --rwlock->__data.__nr_writers_queued;
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
index 62082f4..7118f8a 100644 (file)
@@ -21,7 +21,8 @@
 
 
 int
-pthread_spin_destroy (pthread_spinlock_t *lock)
+pthread_spin_destroy (
+     pthread_spinlock_t *lock)
 {
   /* Nothing to do.  */
   return 0;
index 8ec382f..aebdbd2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 # define INLINE_SETXID_SYSCALL(name, nr, args...) \
   ({                                                                   \
     int __result;                                                      \
-    if (__builtin_expect (__libc_pthread_functions.ptr__nptl_setxid    \
-                         != NULL, 0))                                  \
+    if (__builtin_expect (__libc_pthread_functions_init, 0))           \
       {                                                                        \
        struct xid_command __cmd;                                       \
        __cmd.syscall_no = __NR_##name;                                 \
        __SETXID_##nr (__cmd, args);                                    \
-       __result = __libc_pthread_functions.ptr__nptl_setxid (&__cmd);  \
+       __result = PTHFCT_CALL (ptr__nptl_setxid, (&__cmd));            \
        }                                                               \
     else                                                               \
       __result = INLINE_SYSCALL (name, nr, args);                      \
index 54b5d2d..20cff89 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
-/* This is tricky.  GCC doesn't like #include_next in the primary
-   source file and even if it did, the first #include_next is this
-   exact file anyway.  */
 #ifndef LIBC_SIGACTION
 
 #include <pthreadP.h>
 
 /* We use the libc implementation but we tell it to not allow
    SIGCANCEL or SIGTIMER to be handled.  */
-# define LIBC_SIGACTION        1
-
-# include <sigaction.c>
+#define LIBC_SIGACTION 1
+#include <sigaction.c>
 
 int
 sigaction (int sig, const struct sigaction *act, struct sigaction *oact);
-
 int
 __sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
 {
@@ -47,6 +43,7 @@ __sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
 libc_hidden_proto(sigaction)
 weak_alias (__sigaction, sigaction)
 libc_hidden_weak(sigaction)
+
 #else
 
 # include_next <sigaction.c>
index fbe458f..eed75e2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
diff --git a/libpthread/nptl/sysdeps/pthread/tpp.c b/libpthread/nptl/sysdeps/pthread/tpp.c
new file mode 100644 (file)
index 0000000..0325010
--- /dev/null
@@ -0,0 +1,172 @@
+/* Thread Priority Protect helpers.
+   Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <assert.h>
+#include <atomic.h>
+#include <errno.h>
+#include <pthreadP.h>
+#include <sched.h>
+#include <stdlib.h>
+
+
+int __sched_fifo_min_prio = -1;
+int __sched_fifo_max_prio = -1;
+
+void
+__init_sched_fifo_prio (void)
+{
+  __sched_fifo_max_prio = sched_get_priority_max (SCHED_FIFO);
+  atomic_write_barrier ();
+  __sched_fifo_min_prio = sched_get_priority_min (SCHED_FIFO);
+}
+
+int
+__pthread_tpp_change_priority (int previous_prio, int new_prio)
+{
+  struct pthread *self = THREAD_SELF;
+  struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
+
+  if (tpp == NULL)
+    {
+      if (__sched_fifo_min_prio == -1)
+       __init_sched_fifo_prio ();
+
+      size_t size = sizeof *tpp;
+      size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1)
+             * sizeof (tpp->priomap[0]);
+      tpp = calloc (size, 1);
+      if (tpp == NULL)
+       return ENOMEM;
+      tpp->priomax = __sched_fifo_min_prio - 1;
+      THREAD_SETMEM (self, tpp, tpp);
+    }
+
+  assert (new_prio == -1
+         || (new_prio >= __sched_fifo_min_prio
+             && new_prio <= __sched_fifo_max_prio));
+  assert (previous_prio == -1
+         || (previous_prio >= __sched_fifo_min_prio
+             && previous_prio <= __sched_fifo_max_prio));
+
+  int priomax = tpp->priomax;
+  int newpriomax = priomax;
+  if (new_prio != -1)
+    {
+      if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0)
+       return EAGAIN;
+      ++tpp->priomap[new_prio - __sched_fifo_min_prio];
+      if (new_prio > priomax)
+       newpriomax = new_prio;
+    }
+
+  if (previous_prio != -1)
+    {
+      if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0
+         && priomax == previous_prio
+         && previous_prio > new_prio)
+       {
+         int i;
+         for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i)
+           if (tpp->priomap[i - __sched_fifo_min_prio])
+             break;
+         newpriomax = i;
+       }
+    }
+
+  if (priomax == newpriomax)
+    return 0;
+
+  lll_lock (self->lock, LLL_PRIVATE);
+
+  tpp->priomax = newpriomax;
+
+  int result = 0;
+
+  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
+    {
+      if (__sched_getparam (self->tid, &self->schedparam) != 0)
+       result = errno;
+      else
+       self->flags |= ATTR_FLAG_SCHED_SET;
+    }
+
+  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
+    {
+      self->schedpolicy = __sched_getscheduler (self->tid);
+      if (self->schedpolicy == -1)
+       result = errno;
+      else
+       self->flags |= ATTR_FLAG_POLICY_SET;
+    }
+
+  if (result == 0)
+    {
+      struct sched_param sp = self->schedparam;
+      if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
+       {
+         if (sp.sched_priority < newpriomax)
+           sp.sched_priority = newpriomax;
+
+         if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
+           result = errno;
+       }
+    }
+
+  lll_unlock (self->lock, LLL_PRIVATE);
+
+  return result;
+}
+
+int
+__pthread_current_priority (void)
+{
+  struct pthread *self = THREAD_SELF;
+  if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
+      == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
+    return self->schedparam.sched_priority;
+
+  int result = 0;
+
+  lll_lock (self->lock, LLL_PRIVATE);
+
+  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
+    {
+      if (__sched_getparam (self->tid, &self->schedparam) != 0)
+       result = -1;
+      else
+       self->flags |= ATTR_FLAG_SCHED_SET;
+    }
+
+  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
+    {
+      self->schedpolicy = __sched_getscheduler (self->tid);
+      if (self->schedpolicy == -1)
+       result = -1;
+      else
+       self->flags |= ATTR_FLAG_POLICY_SET;
+    }
+
+  if (result != -1)
+    result = self->schedparam.sched_priority;
+
+  lll_unlock (self->lock, LLL_PRIVATE);
+
+  return result;
+}
index 8b1f244..e058604 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2005, 2006, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>.
 
 #include <stdio.h>
 #include <unwind.h>
 #include <pthreadP.h>
+#include <sysdep.h>
+#include <libgcc_s.h>
 
-#define __libc_dlopen(x)       dlopen(x, (RTLD_LOCAL | RTLD_LAZY))
-#define __libc_dlsym           dlsym
-
+static void *libgcc_s_handle;
 static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
 static _Unwind_Reason_Code (*libgcc_s_personality)
   (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
@@ -34,15 +34,23 @@ static _Unwind_Reason_Code (*libgcc_s_forcedunwind)
 static _Unwind_Word (*libgcc_s_getcfa) (struct _Unwind_Context *);
 
 void
+__attribute_noinline__
 pthread_cancel_init (void)
 {
-  void *resume, *personality, *forcedunwind, *getcfa;
+  void *resume;
+  void *personality;
+  void *forcedunwind;
+  void *getcfa;
   void *handle;
 
-  if (__builtin_expect (libgcc_s_getcfa != NULL, 1))
-    return;
+  if (__builtin_expect (libgcc_s_handle != NULL, 1))
+    {
+      /* Force gcc to reload all values.  */
+      __asm__ volatile ("" ::: "memory");
+      return;
+    }
 
-  handle = __libc_dlopen ("libgcc_s.so.1");
+  handle = __libc_dlopen (LIBGCC_S_SO);
 
   if (handle == NULL
       || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
@@ -55,22 +63,46 @@ pthread_cancel_init (void)
 #endif
       )
   {
-    printf("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
+    printf (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");
     abort();
   }
 
+  PTR_MANGLE (resume);
   libgcc_s_resume = resume;
+  PTR_MANGLE (personality);
   libgcc_s_personality = personality;
+  PTR_MANGLE (forcedunwind);
   libgcc_s_forcedunwind = forcedunwind;
+  PTR_MANGLE (getcfa);
   libgcc_s_getcfa = getcfa;
+  /* Make sure libgcc_s_handle is written last.  Otherwise,
+     pthread_cancel_init might return early even when the pointer the
+     caller is interested in is not initialized yet.  */
+  atomic_write_barrier ();
+  libgcc_s_handle = handle;
+}
+
+void
+__libc_freeres_fn_section
+__unwind_freeres (void)
+{
+  void *handle = libgcc_s_handle;
+  if (handle != NULL)
+    {
+      libgcc_s_handle = NULL;
+      __libc_dlclose (handle);
+    }
 }
 
 void
 _Unwind_Resume (struct _Unwind_Exception *exc)
 {
-  if (__builtin_expect (libgcc_s_resume == NULL, 0))
+  if (__builtin_expect (libgcc_s_handle == NULL, 0))
     pthread_cancel_init ();
-  libgcc_s_resume (exc);
+
+  void (*resume) (struct _Unwind_Exception *exc) = libgcc_s_resume;
+  PTR_DEMANGLE (resume);
+  resume (exc);
 }
 
 _Unwind_Reason_Code
@@ -79,25 +111,37 @@ __gcc_personality_v0 (int version, _Unwind_Action actions,
                       struct _Unwind_Exception *ue_header,
                       struct _Unwind_Context *context)
 {
-  if (__builtin_expect (libgcc_s_personality == NULL, 0))
+  if (__builtin_expect (libgcc_s_handle == NULL, 0))
     pthread_cancel_init ();
-  return libgcc_s_personality (version, actions, exception_class,
-                              ue_header, context);
+
+  _Unwind_Reason_Code (*personality)
+    (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+     struct _Unwind_Context *) = libgcc_s_personality;
+  PTR_DEMANGLE (personality);
+  return personality (version, actions, exception_class, ue_header, context);
 }
 
 _Unwind_Reason_Code
 _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop,
                      void *stop_argument)
 {
-  if (__builtin_expect (libgcc_s_forcedunwind == NULL, 0))
+  if (__builtin_expect (libgcc_s_handle == NULL, 0))
     pthread_cancel_init ();
-  return libgcc_s_forcedunwind (exc, stop, stop_argument);
+
+  _Unwind_Reason_Code (*forcedunwind)
+    (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *)
+    = libgcc_s_forcedunwind;
+  PTR_DEMANGLE (forcedunwind);
+  return forcedunwind (exc, stop, stop_argument);
 }
 
 _Unwind_Word
 _Unwind_GetCFA (struct _Unwind_Context *context)
 {
-  if (__builtin_expect (libgcc_s_getcfa == NULL, 0))
+  if (__builtin_expect (libgcc_s_handle == NULL, 0))
     pthread_cancel_init ();
-  return libgcc_s_getcfa (context);
+
+  _Unwind_Word (*getcfa) (struct _Unwind_Context *) = libgcc_s_getcfa;
+  PTR_DEMANGLE (getcfa);
+  return getcfa (context);
 }
index 018d2fd..3ca2fd8 100644 (file)
@@ -21,6 +21,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <unwind.h>
+#include <libgcc_s.h>
 
 static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
 static _Unwind_Reason_Code (*libgcc_s_personality)
@@ -33,17 +34,16 @@ void abort(void);
 static void
 init (void)
 {
-  void *resume = NULL;
-  void *personality = NULL;
+  void *resume, *personality;
   void *handle;
-  resume = personality = NULL; /* make gcc silent */
-  handle = dlopen ("libgcc_s.so.1", (RTLD_LOCAL | RTLD_LAZY));
+  resume = personality = NULL;
+  handle = dlopen (LIBGCC_S_SO, (RTLD_LOCAL | RTLD_LAZY));
 
   if (handle == NULL
       || (resume = dlsym (handle, "_Unwind_Resume")) == NULL
       || (personality = dlsym (handle, "__gcc_personality_v0")) == NULL)
   {
-    printf("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
+    printf (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");
     abort();
   }
 
index 539789a..753b72b 100644 (file)
@@ -9,3 +9,7 @@ CLEANUP_JMP_BUF         offsetof (struct pthread, cleanup_jmp_buf)
 MULTIPLE_THREADS_OFFSET        offsetof (struct pthread, header.multiple_threads)
 TLS_PRE_TCB_SIZE       sizeof (struct pthread)
 MUTEX_FUTEX            offsetof (pthread_mutex_t, __data.__lock)
+POINTER_GUARD          offsetof (tcbhead_t, pointer_guard)
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX          offsetof (struct pthread, header.private_futex)
+#endif
index 5e5ce12..2c538ed 100644 (file)
@@ -1,5 +1,5 @@
 /* Definition for thread-local data handling.  NPTL/SH version.
-   Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 # include <stdbool.h>
 # include <stddef.h>
 # include <stdint.h>
+# include <stdlib.h>
+# include <list.h>
+# include <sysdep.h>
+# include <bits/kernel-features.h>
 
 /* Type for the dtv.  */
 typedef union dtv
@@ -39,7 +43,7 @@ typedef union dtv
 typedef struct
 {
   dtv_t *dtv;
-  void *private;
+  uintptr_t pointer_guard;
 } tcbhead_t;
 
 # define TLS_MULTIPLE_THREADS_IN_TCB 1
@@ -52,9 +56,9 @@ typedef struct
 /* We require TLS support in the tools.  */
 #define HAVE_TLS_SUPPORT
 #define HAVE___THREAD   1
-#define HAVE_TLS_MODEL_ATTRIBUTE       1
+#define HAVE_TLS_MODEL_ATTRIBUTE       1
 /* Signal that TLS support is available.  */
-# define USE_TLS       1
+# define USE_TLS       1
 
 #ifndef __ASSEMBLER__
 
@@ -115,9 +119,9 @@ typedef struct
         struct pthread *self = thread_self();
    do not get optimized away.  */
 # define THREAD_SELF \
-  ({ struct pthread *__thread_self;                                                  \
-     __asm ("stc gbr,%0" : "=r" (__thread_self));                                    \
-     __thread_self - 1;})
+  ({ struct pthread *__self;                                                 \
+     __asm ("stc gbr,%0" : "=r" (__self));                                   \
+     __self - 1;})
 
 /* Magic for libthread_db to know how to do THREAD_SELF.  */
 # define DB_THREAD_SELF \
@@ -137,6 +141,42 @@ typedef struct
 # define THREAD_SETMEM_NC(descr, member, idx, value) \
     descr->member[idx] = (value)
 
+#define THREAD_GET_POINTER_GUARD() \
+  ({ tcbhead_t *__tcbp;                                                              \
+     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));                        \
+     __tcbp->pointer_guard;})
+ #define THREAD_SET_POINTER_GUARD(value) \
+  ({ tcbhead_t *__tcbp;                                                              \
+     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));                        \
+     __tcbp->pointer_guard = (value);})
+#define THREAD_COPY_POINTER_GUARD(descr) \
+  ({ tcbhead_t *__tcbp;                                                              \
+     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));                        \
+     ((tcbhead_t *) (descr + 1))->pointer_guard        = __tcbp->pointer_guard;})
+
+/* Get and set the global scope generation counter in struct pthread.  */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED   1
+#define THREAD_GSCOPE_FLAG_WAIT   2
+#define THREAD_GSCOPE_RESET_FLAG() \
+  do                                                                        \
+    { int __res                                                                     \
+       = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag,             \
+                              THREAD_GSCOPE_FLAG_UNUSED);                   \
+      if (__res == THREAD_GSCOPE_FLAG_WAIT)                                 \
+       lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE);   \
+    }                                                                       \
+  while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+  do                                                                        \
+    {                                                                       \
+      THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;            \
+      atomic_write_barrier ();                                              \
+    }                                                                       \
+  while (0)
+#define THREAD_GSCOPE_WAIT() \
+  GL(dl_wait_lookup_done) ()
+
 #endif /* __ASSEMBLER__ */
 
 #endif /* tls.h */
index 237f975..923af8a 100644 (file)
@@ -2,5 +2,6 @@
 #include <tls.h>
 
 MULTIPLE_THREADS_OFFSET                offsetof (tcbhead_t, multiple_threads)
+POINTER_GUARD                  offsetof (tcbhead_t, pointer_guard)
 PID                            offsetof (struct pthread, pid)
 TID                            offsetof (struct pthread, tid)
index e5d27fb..e93542c 100644 (file)
@@ -1,5 +1,5 @@
 /* Definitions for thread-local data handling.  NPTL/sparc version.
-   Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -26,6 +26,7 @@
 # include <stdint.h>
 # include <stdlib.h>
 # include <list.h>
+# include <bits/kernel-features.h>
 
 /* Type for the dtv.  */
 typedef union dtv
@@ -45,8 +46,18 @@ typedef struct
   dtv_t *dtv;
   void *self;
   int multiple_threads;
+#if __WORDSIZE == 64
+  int gscope_flag;
+#endif
   uintptr_t sysinfo;
   uintptr_t stack_guard;
+  uintptr_t pointer_guard;
+#if __WORDSIZE != 64
+  int gscope_flag;
+#endif
+#ifndef __ASSUME_PRIVATE_FUTEX
+  int private_futex;
+#endif
 } tcbhead_t;
 
 #else /* __ASSEMBLER__ */
@@ -59,12 +70,15 @@ typedef struct
 #define HAVE_TLS_MODEL_ATTRIBUTE 1
 
 /* Signal that TLS support is available.  */
-#define USE_TLS        1
+#define USE_TLS        1
 
 #ifndef __ASSEMBLER__
 /* Get system call information.  */
 # include <sysdep.h>
 
+/* Get the thread descriptor definition.  */
+# include <descr.h>
+
 register struct pthread *__thread_self __asm__("%g7");
 
 /* This is the size of the initial TCB.  Can't be just sizeof (tcbhead_t),
@@ -81,9 +95,6 @@ register struct pthread *__thread_self __asm__("%g7");
 /* Alignment requirements for the TCB.  */
 # define TLS_TCB_ALIGN __alignof__ (struct pthread)
 
-/* Get the thread descriptor definition.  */
-# include <descr.h>
-
 /* The TCB can have any size and the memory following the address the
    thread pointer points to is unspecified.  Allocate the TCB there.  */
 # define TLS_TCB_AT_TP 1
@@ -134,6 +145,37 @@ register struct pthread *__thread_self __asm__("%g7");
   ((descr)->header.stack_guard \
    = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
 
+/* Get/set the stack guard field in TCB head.  */
+#define THREAD_GET_POINTER_GUARD() \
+  THREAD_GETMEM (THREAD_SELF, header.pointer_guard)
+#define THREAD_SET_POINTER_GUARD(value) \
+  THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
+# define THREAD_COPY_POINTER_GUARD(descr) \
+  ((descr)->header.pointer_guard = THREAD_GET_POINTER_GUARD ())
+
+/* Get and set the global scope generation counter in struct pthread.  */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED   1
+#define THREAD_GSCOPE_FLAG_WAIT   2
+#define THREAD_GSCOPE_RESET_FLAG() \
+  do                                                                        \
+    { int __res                                                                     \
+       = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag,             \
+                              THREAD_GSCOPE_FLAG_UNUSED);                   \
+      if (__res == THREAD_GSCOPE_FLAG_WAIT)                                 \
+       lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE);   \
+    }                                                                       \
+  while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+  do                                                                        \
+    {                                                                       \
+      THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED;            \
+      atomic_write_barrier ();                                              \
+    }                                                                       \
+  while (0)
+#define THREAD_GSCOPE_WAIT() \
+  GL(dl_wait_lookup_done) ()
+
 #endif /* !ASSEMBLER */
 
 #endif /* tls.h */
index 6c491b6..fa73a8a 100644 (file)
@@ -10,6 +10,7 @@ libpthread_CSRC = pthread_attr_getaffinity.c                  \
                  pthread_getcpuclockid.c pthread_kill.c                \
                  pthread_mutex_cond_lock.c pthread_setaffinity.c       \
                  pthread_yield.c sem_post.c sem_timedwait.c            \
+                 pthread_sigqueue.c \
                  sem_trywait.c sem_wait.c pt-fork.c                    \
                  sigtimedwait.c sigwaitinfo.c sigwait.c pt-sleep.c
 
@@ -148,7 +149,10 @@ headers_clean-y += nptl_linux_headers_clean
 CFLAGS-lowlevelbarrier.c = -S
 CFLAGS-lowlevelcond.c = -S
 CFLAGS-lowlevelrwlock.c = -S
+CFLAGS-lowlevelrobustlock.c = -S
 CFLAGS-unwindbuf.c = -S
+CFLAGS-structsem.c = -S
+CFLAGS-pthread-pi-defines.c = -S
 
 $(PTHREAD_LINUX_OUT)/lowlevelbarrier.c: $(PTHREAD_LINUX_DIR)/lowlevelbarrier.sym
        $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
@@ -159,9 +163,19 @@ $(PTHREAD_LINUX_OUT)/lowlevelcond.c: $(PTHREAD_LINUX_DIR)/lowlevelcond.sym
 $(PTHREAD_LINUX_OUT)/lowlevelrwlock.c: $(PTHREAD_LINUX_DIR)/lowlevelrwlock.sym
        $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
 
+$(PTHREAD_LINUX_OUT)/lowlevelrobustlock.c: $(PTHREAD_LINUX_DIR)/lowlevelrobustlock.sym
+       $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+
 $(PTHREAD_LINUX_OUT)/unwindbuf.c: $(PTHREAD_LINUX_DIR)/unwindbuf.sym
        $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
 
+$(PTHREAD_LINUX_OUT)/structsem.c: $(PTHREAD_LINUX_DIR)/structsem.sym
+       $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+
+$(PTHREAD_LINUX_OUT)/pthread-pi-defines.c: $(PTHREAD_LINUX_DIR)/pthread-pi-defines.sym
+       $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+
+
 $(PTHREAD_LINUX_OUT)/lowlevelbarrier.s: $(PTHREAD_LINUX_OUT)/lowlevelbarrier.c
        $(compile.c)
 
@@ -171,9 +185,19 @@ $(PTHREAD_LINUX_OUT)/lowlevelcond.s: $(PTHREAD_LINUX_OUT)/lowlevelcond.c
 $(PTHREAD_LINUX_OUT)/lowlevelrwlock.s: $(PTHREAD_LINUX_OUT)/lowlevelrwlock.c
        $(compile.c)
 
+$(PTHREAD_LINUX_OUT)/lowlevelrobustlock.s: $(PTHREAD_LINUX_OUT)/lowlevelrobustlock.c
+       $(compile.c)
+
 $(PTHREAD_LINUX_OUT)/unwindbuf.s: $(PTHREAD_LINUX_OUT)/unwindbuf.c
        $(compile.c)
 
+$(PTHREAD_LINUX_OUT)/structsem.s: $(PTHREAD_LINUX_OUT)/structsem.c
+       $(compile.c)
+
+$(PTHREAD_LINUX_OUT)/pthread-pi-defines.s: $(PTHREAD_LINUX_OUT)/pthread-pi-defines.c
+       $(compile.c)
+
+
 $(PTHREAD_LINUX_OUT)/lowlevelbarrier.h: $(PTHREAD_LINUX_OUT)/lowlevelbarrier.s
        $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
 
@@ -183,13 +207,25 @@ $(PTHREAD_LINUX_OUT)/lowlevelcond.h: $(PTHREAD_LINUX_OUT)/lowlevelcond.s
 $(PTHREAD_LINUX_OUT)/lowlevelrwlock.h: $(PTHREAD_LINUX_OUT)/lowlevelrwlock.s
        $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
 
+$(PTHREAD_LINUX_OUT)/lowlevelrobustlock.h: $(PTHREAD_LINUX_OUT)/lowlevelrobustlock.s
+       $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+
 $(PTHREAD_LINUX_OUT)/unwindbuf.h: $(PTHREAD_LINUX_OUT)/unwindbuf.s
        $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
 
+$(PTHREAD_LINUX_OUT)/structsem.h: $(PTHREAD_LINUX_OUT)/structsem.s
+       $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+
+$(PTHREAD_LINUX_OUT)/pthread-pi-defines.h: $(PTHREAD_LINUX_OUT)/pthread-pi-defines.s
+       $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+
 nptl_linux_headers: $(PTHREAD_LINUX_OUT)/lowlevelbarrier.h \
                                        $(PTHREAD_LINUX_OUT)/lowlevelcond.h \
                                        $(PTHREAD_LINUX_OUT)/lowlevelrwlock.h \
-                                       $(PTHREAD_LINUX_OUT)/unwindbuf.h
+                                       $(PTHREAD_LINUX_OUT)/lowlevelrobustlock.h \
+                                       $(PTHREAD_LINUX_OUT)/unwindbuf.h \
+                                       $(PTHREAD_LINUX_OUT)/structsem.h \
+                                       $(PTHREAD_LINUX_OUT)/pthread-pi-defines.h
 
 HEADERS_BITS_PTHREAD     := $(notdir $(wildcard $(PTHREAD_LINUX_DIR)/bits/*.h))
 ALL_HEADERS_BITS_PTHREAD := $(addprefix include/bits/,$(HEADERS_BITS_PTHREAD))
@@ -201,7 +237,10 @@ nptl_linux_headers_clean:
        $(do_rm) $(addprefix $(PTHREAD_LINUX_OUT)/lowlevelbarrier., c h s) \
        $(addprefix $(PTHREAD_LINUX_OUT)/lowlevelcond., c h s) \
        $(addprefix $(PTHREAD_LINUX_OUT)/lowlevelrwlock., c h s) \
-       $(addprefix $(PTHREAD_LINUX_OUT)/unwindbuf., c h s)
+       $(addprefix $(PTHREAD_LINUX_OUT)/lowlevelrobustlock., c h s) \
+       $(addprefix $(PTHREAD_LINUX_OUT)/unwindbuf., c h s) \
+       $(addprefix $(PTHREAD_LINUX_OUT)/structsem., c h s) \
+       $(addprefix $(PTHREAD_LINUX_OUT)/pthread-pi-defines., c h s)
 
 
 nptl_linux_clean:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions
deleted file mode 100644 (file)
index 437c4da..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-libpthread {
-  GLIBC_2.3.3 {
-    # Changed PTHREAD_STACK_MIN.
-    pthread_attr_setstack; pthread_attr_setstacksize;
-  }
-}
-librt {
-  GLIBC_2.3.3 {
-    # Changed timer_t.
-    timer_create; timer_delete; timer_getoverrun; timer_gettime;
-    timer_settime;
-  }
-}
index e071878..a7c9740 100644 (file)
@@ -1,5 +1,5 @@
 /* Minimum guaranteed maximum values for system limits.  Linux/Alpha version.
-   Copyright (C) 1993-1998,2000,2002,2003,2004 Free Software Foundation, Inc.
+   Copyright (C) 1993-1998,2000,2002-2004,2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -31,6 +31,9 @@
 #ifndef OPEN_MAX
 # define __undef_OPEN_MAX
 #endif
+#ifndef ARG_MAX
+# define __undef_ARG_MAX
+#endif
 
 /* The kernel sources contain a file with all the needed information.  */
 #include <linux/limits.h>
 # undef OPEN_MAX
 # undef __undef_OPEN_MAX
 #endif
+/* Have to remove ARG_MAX?  */
+#ifdef __undef_ARG_MAX
+# undef ARG_MAX
+# undef __undef_ARG_MAX
+#endif
 
 /* The number of data keys per process.  */
 #define _POSIX_THREAD_KEYS_MAX 128
@@ -87,3 +95,6 @@
 
 /* Maximum message queue priority level.  */
 #define MQ_PRIO_MAX            32768
+
+/* Maximum value the semaphore can have.  */
+#define SEM_VALUE_MAX   (2147483647)
index 0f3bf83..41c0be1 100644 (file)
@@ -1,5 +1,5 @@
 /* Machine-specific pthread type layouts.  Alpha version.
-   Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -65,7 +65,7 @@ typedef union
     int __kind;
     int __spins;
     __pthread_list_t __list;
-#define __PTHREAD_MUTEX_HAVE_PREV       1
+#define __PTHREAD_MUTEX_HAVE_PREV      1
   } __data;
   char __size[__SIZEOF_PTHREAD_MUTEX_T];
   long int __align;
@@ -126,9 +126,9 @@ typedef union
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
     int __writer;
-    int __pad1;
+    int __shared;
+    unsigned long int __pad1;
     unsigned long int __pad2;
-    unsigned long int __pad3;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
     unsigned int __flags;
index 6dadfda..be4469c 100644 (file)
@@ -26,9 +26,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
index fd4a7ca..b7f4de3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
 #include <sysdep.h>
+#include <bits/kernel-features.h>
 
 
-#define __NR_futex             394
 #define FUTEX_WAIT             0
 #define FUTEX_WAKE             1
 #define FUTEX_REQUEUE          3
 #define FUTEX_CMP_REQUEUE      4
-
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
-#define lll_futex_wait(futexp, val) \
+#define FUTEX_WAKE_OP          5
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)                               \
+             & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif              
+#endif
+
+
+#define lll_futex_wait(futexp, val, private) \
+  lll_futex_timed_wait (futexp, val, NULL, private)
+
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), 0);                \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAIT, private),       \
+                             (val), (timespec));                             \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret;                 \
   })
 
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_wake(futexp, nr, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), (timespec));       \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE, private),       \
+                             (nr), 0);                                       \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret;                 \
   })
 
-#define lll_futex_wake(futexp, nr) \
+#define lll_robust_dead(futexv, private) \
+  do                                                                         \
+    {                                                                        \
+      int *__futexp = &(futexv);                                             \
+      atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
+      lll_futex_wake (__futexp, 1, private);                                 \
+    }                                                                        \
+  while (0)
+
+/* Returns non-zero if error happened, zero if success.  */
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAKE, (nr), 0);                 \
-    INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret;                 \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+                             (nr_wake), (nr_move), (mutex), (val));          \
+    INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,                               \
-                             (futexp), FUTEX_CMP_REQUEUE, (nr_wake),         \
-                             (nr_move), (mutex), (val));                     \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE_OP, private),    \
+                             (nr_wake), (nr_wake2), (futexp2),               \
+                             FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);                 \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
 
+
+
 static inline int __attribute__((always_inline))
-__lll_mutex_trylock(int *futex)
+__lll_trylock(int *futex)
 {
   return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
 }
-#define lll_mutex_trylock(lock)        __lll_mutex_trylock (&(lock))
+#define lll_trylock(lock)      __lll_trylock (&(lock))
 
 
 static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock(int *futex)
+__lll_cond_trylock(int *futex)
 {
   return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
 }
-#define lll_mutex_cond_trylock(lock)   __lll_mutex_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
+static inline int __attribute__((always_inline))
+__lll_robust_trylock(int *futex, int id)
+{
+  return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
+}
+#define lll_robust_trylock(lock, id) \
+  __lll_robust_trylock (&(lock), id)
+
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
 static inline void __attribute__((always_inline))
-__lll_mutex_lock(int *futex)
+__lll_lock(int *futex, int private)
 {
   if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    __lll_lock_wait (futex);
+    {
+      if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+       __lll_lock_wait_private (futex);
+      else
+       __lll_lock_wait (futex, private);
+    }
+}
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+static inline int __attribute__ ((always_inline))
+__lll_robust_lock (int *futex, int id, int private)
+{
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_lock_wait (futex, private);
+  return result;
 }
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 
 static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
 {
   if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
-    __lll_lock_wait (futex);
+    __lll_lock_wait (futex, private);
 }
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
 
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
-       attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+                                int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+                                       int private) attribute_hidden;
 
 static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    result = __lll_timedlock_wait (futex, abstime);
+    result = __lll_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
 
 
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
+static inline int __attribute__ ((always_inline))
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+                       int id, int private)
 {
-  int val = atomic_exchange_rel (futex, 0);
-  if (__builtin_expect (val > 1, 0))
-    lll_futex_wake (futex, 1);
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_timedlock_wait (futex, abstime, private);
+  return result;
 }
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
-
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
 
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
-  (void) atomic_exchange_rel (futex, 0);
-  lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
 
+#define __lll_unlock(futex, private) \
+  (void)                                                       \
+    ({ int *__futex = (futex);                                 \
+       int __oldval = atomic_exchange_rel (__futex, 0);                \
+       if (__builtin_expect (__oldval > 1, 0))                 \
+        lll_futex_wake (__futex, 1, private);                  \
+    })
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
 
-#define lll_mutex_islocked(futex) \
-  (futex != 0)
 
+#define __lll_robust_unlock(futex, private) \
+  (void)                                                       \
+    ({ int *__futex = (futex);                                 \
+       int __oldval = atomic_exchange_rel (__futex, 0);                \
+       if (__builtin_expect (__oldval & FUTEX_WAITERS, 0))     \
+        lll_futex_wake (__futex, 1, private);                  \
+    })
+#define lll_robust_unlock(futex, private) \
+  __lll_robust_unlock(&(futex), private)
 
-/* Our internal lock implementation is identical to the binary-compatible
-   mutex implementation. */
 
-/* Type for lock object.  */
-typedef int lll_lock_t;
+#define lll_islocked(futex) \
+  (futex != 0)
 
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER           (0)
 #define LLL_LOCK_INITIALIZER_LOCKED    (1)
 
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-   >1  -  taken by more users */
-
-#define lll_trylock(lock)      lll_mutex_trylock (lock)
-#define lll_lock(lock)         lll_mutex_lock (lock)
-#define lll_unlock(lock)       lll_mutex_unlock (lock)
-#define lll_islocked(lock)     lll_mutex_islocked (lock)
 
 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
    afterwards. */
 #define lll_wait_tid(tid) \
-  do {                                 \
-    __typeof (tid) __tid;              \
-    while ((__tid = (tid)) != 0)       \
-      lll_futex_wait (&(tid), __tid);  \
+  do {                                                 \
+    __typeof (tid) __tid;                              \
+    while ((__tid = (tid)) != 0)                       \
+      lll_futex_wait (&(tid), __tid, LLL_SHARED);      \
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -192,26 +281,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;                                             \
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif /* lowlevellock.h */
index 79a3c47..0e7e979 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@ clear_once_control (void *arg)
   pthread_once_t *once_control = (pthread_once_t *) arg;
 
   *once_control = 0;
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 }
 
 int
@@ -72,7 +72,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
        break;
 
       /* Same generation, some other thread was faster. Wait.  */
-      lll_futex_wait (once_control, oldval);
+      lll_futex_wait (once_control, oldval, LLL_PRIVATE);
     }
 
   /* This thread is the first here.  Do the initialization.
@@ -88,7 +88,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
   atomic_increment (once_control);
 
   /* Wake up all other threads.  */
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 
   return 0;
 }
index 4922407..7049b36 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -55,6 +55,7 @@ __LABEL(name)                                                 \
        bne     a3, SYSCALL_ERROR_LABEL;                        \
 __LABEL($pseudo_ret)                                           \
        .subsection 2;                                          \
+       cfi_startproc;                                          \
 __LABEL($pseudo_cancel)                                                \
        subq    sp, 64, sp;                                     \
        cfi_def_cfa_offset(64);                                 \
@@ -90,12 +91,13 @@ __LABEL($multi_error)                                               \
        cfi_def_cfa_offset(0);                                  \
 __LABEL($syscall_error)                                                \
        SYSCALL_ERROR_HANDLER;                                  \
+       cfi_endproc;                                            \
        .previous
 
 # undef PSEUDO_END
 # define PSEUDO_END(sym)                                       \
-       .subsection 2;                                          \
        cfi_endproc;                                            \
+       .subsection 2;                                          \
        .size sym, .-sym
 
 # define SAVE_ARGS_0   /* Nothing.  */
@@ -142,7 +144,7 @@ __LABEL($syscall_error)                                             \
 extern int __local_multiple_threads attribute_hidden;
 #   define SINGLE_THREAD_P \
        __builtin_expect (__local_multiple_threads == 0, 1)
-#  elif defined(PIC)
+#  elif defined(__PIC__)
 #   define SINGLE_THREAD_P(reg)  ldl reg, __local_multiple_threads(gp) !gprel
 #  else
 #   define SINGLE_THREAD_P(reg)                                        \
@@ -167,3 +169,9 @@ extern int __local_multiple_threads attribute_hidden;
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index f0c3998..f4ed931 100644 (file)
@@ -42,5 +42,5 @@ PSEUDO (__vfork, vfork, 0)
 1:     ret
 
 PSEUDO_END (__vfork)
-hidden_def (__vfork)
+libc_hidden_def (__vfork)
 weak_alias (__vfork, vfork)
index 49a935a..b0586ea 100644 (file)
@@ -37,61 +37,21 @@ typedef uintmax_t uatomic_max_t;
 
 void __arm_link_error (void);
 
-#ifdef __thumb__
-
-/* Note that to allow efficient implementation the arguemtns are reversed
-   relative to atomic_exchange_acq.  */
-int __thumb_swpb (int newvalue, void *mem)
-  attribute_hidden;
-unsigned int __thumb_swp (unsigned int newvalue, void *mem)
-  attribute_hidden;
-unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *mem)
-  attribute_hidden;
-
-#define atomic_exchange_acq(mem, newvalue)                                   \
-  ({ __typeof (*mem) result;                                                 \
-     if (sizeof (*mem) == 1)                                                 \
-       result = __thumb_swpb (newvalue, mem);                                \
-     else if (sizeof (*mem) == 4)                                            \
-       result = __thumb_swp (newvalue, mem);                                 \
-     else                                                                    \
-       {                                                                     \
-        result = 0;                                                          \
-        abort ();                                                            \
-       }                                                                     \
-     result; })
-
-#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
-  ({ __arm_link_error (); oldval; })
-
-#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
-  ({ __arm_link_error (); oldval; })
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
-  ((__typeof (oldval)) __thumb_cmpxchg (oldval, newval, mem))
-
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
-  ({ __arm_link_error (); oldval; })
-
+#ifdef __thumb2__
+#define atomic_full_barrier() \
+     __asm__ __volatile__                                                    \
+            ("movw\tip, #0x0fa0\n\t"                                         \
+             "movt\tip, #0xffff\n\t"                                         \
+             "blx\tip"                                                       \
+             : : : "ip", "lr", "cc", "memory");
 #else
-/* ARM mode.  */
-
-#define atomic_exchange_acq(mem, newvalue)                                   \
-  ({ __typeof (*mem) _xchg_result;                                           \
-     if (sizeof (*mem) == 1)                                                 \
-       __asm__ __volatile__ ("swpb %0, %1, [%2]"                             \
-                            : "=&r,&r" (_xchg_result)                        \
-                            : "r,0" (newvalue), "r,r" (mem) : "memory");     \
-     else if (sizeof (*mem) == 4)                                            \
-       __asm__ __volatile__ ("swp %0, %1, [%2]"                                      \
-                            : "=&r,&r" (_xchg_result)                        \
-                            : "r,0" (newvalue), "r,r" (mem) : "memory");     \
-     else                                                                    \
-       {                                                                     \
-        _xchg_result = 0;                                                    \
-        abort ();                                                            \
-       }                                                                     \
-     _xchg_result; })
+#define atomic_full_barrier() \
+     __asm__ __volatile__                                                    \
+            ("mov\tip, #0xffff0fff\n\t"                                      \
+             "mov\tlr, pc\n\t"                                               \
+             "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)"                       \
+             : : : "ip", "lr", "cc", "memory");
+#endif
 
 /* Atomic compare and exchange.  This sequence relies on the kernel to
    provide a compare and exchange operation which is atomic on the
@@ -108,6 +68,9 @@ unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *me
    specify one to work around GCC PR rtl-optimization/21223.  Otherwise
    it may cause a_oldval or a_tmp to be moved to a different register.  */
 
+#ifdef __thumb2__
+/* Thumb-2 has ldrex/strex.  However it does not have barrier instructions,
+   so we still need to use the kernel helper.  */
 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
   ({ register __typeof (oldval) a_oldval asm ("r0");                         \
      register __typeof (oldval) a_newval asm ("r1") = (newval);                      \
@@ -115,22 +78,45 @@ unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *me
      register __typeof (oldval) a_tmp asm ("r3");                            \
      register __typeof (oldval) a_oldval2 asm ("r4") = (oldval);             \
      __asm__ __volatile__                                                    \
-            ("0:\tldr\t%1,[%3]\n\t"                                          \
-             "cmp\t%1, %4\n\t"                                               \
+            ("0:\tldr\t%[tmp],[%[ptr]]\n\t"                                  \
+             "cmp\t%[tmp], %[old2]\n\t"                                      \
              "bne\t1f\n\t"                                                   \
-             "mov\t%0, %4\n\t"                                               \
-             "mov\t%1, #0xffff0fff\n\t"                                      \
+             "mov\t%[old], %[old2]\n\t"                                      \
+             "movw\t%[tmp], #0x0fc0\n\t"                                     \
+             "movt\t%[tmp], #0xffff\n\t"                                     \
+             "blx\t%[tmp]\n\t"                                               \
+             "bcc\t0b\n\t"                                                   \
+             "mov\t%[tmp], %[old2]\n\t"                                      \
+             "1:"                                                            \
+             : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)                   \
+             : [new] "r" (a_newval), [ptr] "r" (a_ptr),                      \
+               [old2] "r" (a_oldval2)                                        \
+             : "ip", "lr", "cc", "memory");                                  \
+     a_tmp; })
+#else
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+  ({ register __typeof (oldval) a_oldval asm ("r0");                         \
+     register __typeof (oldval) a_newval asm ("r1") = (newval);                      \
+     register __typeof (mem) a_ptr asm ("r2") = (mem);                       \
+     register __typeof (oldval) a_tmp asm ("r3");                            \
+     register __typeof (oldval) a_oldval2 asm ("r4") = (oldval);             \
+     __asm__ __volatile__                                                    \
+            ("0:\tldr\t%[tmp],[%[ptr]]\n\t"                                  \
+             "cmp\t%[tmp], %[old2]\n\t"                                      \
+             "bne\t1f\n\t"                                                   \
+             "mov\t%[old], %[old2]\n\t"                                      \
+             "mov\t%[tmp], #0xffff0fff\n\t"                                  \
              "mov\tlr, pc\n\t"                                               \
-             "add\tpc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"                   \
+             "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t"               \
              "bcc\t0b\n\t"                                                   \
-             "mov\t%1, %4\n\t"                                               \
+             "mov\t%[tmp], %[old2]\n\t"                                      \
              "1:"                                                            \
-             : "=&r" (a_oldval), "=&r" (a_tmp)                               \
-             : "r" (a_newval), "r" (a_ptr), "r" (a_oldval2)                  \
+             : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)                   \
+             : [new] "r" (a_newval), [ptr] "r" (a_ptr),                      \
+               [old2] "r" (a_oldval2)                                        \
              : "ip", "lr", "cc", "memory");                                  \
      a_tmp; })
+#endif
 
 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
   ({ __arm_link_error (); oldval; })
-
-#endif /* __thumb__ */
index ea8d6a2..e1b115c 100644 (file)
@@ -19,6 +19,8 @@
 #ifndef _BITS_PTHREADTYPES_H
 #define _BITS_PTHREADTYPES_H   1
 
+#include <endian.h>
+
 #define __SIZEOF_PTHREAD_ATTR_T 36
 #define __SIZEOF_PTHREAD_MUTEX_T 24
 #define __SIZEOF_PTHREAD_MUTEXATTR_T 4
@@ -126,9 +128,21 @@ typedef union
     unsigned int __writer_wakeup;
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
+#if __BYTE_ORDER == __BIG_ENDIAN
+    unsigned char __pad1;
+    unsigned char __pad2;
+    unsigned char __shared;
+    /* FLAGS must stay at this position in the structure to maintain
+       binary compatibility.  */
+    unsigned char __flags;
+#else
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
-    unsigned int __flags;
+    unsigned char __flags;
+    unsigned char __shared;
+    unsigned char __pad1;
+    unsigned char __pad2;
+#endif
     int __writer;
   } __data;
   char __size[__SIZEOF_PTHREAD_RWLOCK_T];
index 3fc647d..dadfac2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -27,9 +27,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   ((int) ((~0u) >> 1))
-
 
 typedef union
 {
index 74be188..60ccf77 100644 (file)
@@ -1,5 +1,5 @@
 /* low level locking for pthread library.  Generic futex-using version.
-   Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #include <lowlevellock.h>
 #include <sys/time.h>
 
+void
+__lll_lock_wait_private (int *futex)
+{
+  do
+    {
+      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+      if (oldval != 0)
+       lll_futex_wait (futex, 2, LLL_PRIVATE);
+    }
+  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
+/* These functions don't get included in libc.so  */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+  do
+    {
+      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+      if (oldval != 0)
+       lll_futex_wait (futex, 2, private);
+    }
+  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
 int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
 {
   struct timespec rt;
 
@@ -55,23 +83,10 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
       if (rt.tv_sec < 0)
        return ETIMEDOUT;
 
-      lll_futex_timed_wait (futex, 2, &rt);
+      // XYZ: Lost the lock to check whether it was private.
+      lll_futex_timed_wait (futex, 2, &rt, private);
     }
-  while (atomic_exchange_acq (futex, 2) != 0);
-
-  return 0;
-}
-
-
-/* These don't get included in libc.so  */
-#ifdef IS_IN_libpthread
-int
-lll_unlock_wake_cb (int *futex)
-{
-  int val = atomic_exchange_rel (futex, 0);
-
-  if (__builtin_expect (val > 1, 0))
-    lll_futex_wake (futex, 1);
+  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
 
   return 0;
 }
@@ -108,11 +123,11 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
        return ETIMEDOUT;
 
       /* Wait until thread terminates.  */
-      if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT)
+      // XYZ: Lost the lock to check whether it was private.
+      if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
        return ETIMEDOUT;
     }
 
   return 0;
 }
-
 #endif
index 79f3dde..4c7d08c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
    Lesser General Public License for more details.
 
    You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Libr   \ary; if not, write to the Free
+   License along with the GNU C Library; if not, write to the Free
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
@@ -24,6 +24,7 @@
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
 #include <sysdep.h>
+#include <bits/kernel-features.h>
 
 #define FUTEX_WAIT             0
 #define FUTEX_WAKE             1
 #define FUTEX_CMP_REQUEUE      4
 #define FUTEX_WAKE_OP          5
 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)                               \
+             & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif              
+#endif
 
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
 
-#define lll_futex_wait(futexp, val) \
-  ({                                                                         \
-    INTERNAL_SYSCALL_DECL (__err);                                           \
-    long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), 0);                \
-    __ret;                                                                   \
-  })
+#define lll_futex_wait(futexp, val, private) \
+  lll_futex_timed_wait(futexp, val, NULL, private)
 
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), (timespec));       \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAIT, private),       \
+                             (val), (timespec));                             \
     __ret;                                                                   \
   })
 
-#define lll_futex_wake(futexp, nr) \
+#define lll_futex_wake(futexp, nr, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAKE, (nr), 0);                 \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE, private),       \
+                             (nr), 0);                                       \
     __ret;                                                                   \
   })
 
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
   do                                                                         \
     {                                                                        \
       int *__futexp = &(futexv);                                             \
       atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
-      lll_futex_wake (__futexp, 1);                                          \
+      lll_futex_wake (__futexp, 1, private);                                 \
     }                                                                        \
   while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,                               \
-                             (futexp), FUTEX_CMP_REQUEUE, (nr_wake),         \
-                             (nr_move), (mutex), (val));                     \
-    __ret;                                                                   \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+                             (nr_wake), (nr_move), (mutex), (val));          \
+    INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,                               \
-                             (futexp), FUTEX_WAKE_OP, (nr_wake),             \
-                             (nr_wake2), (futexp2),                          \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE_OP, private),    \
+                             (nr_wake), (nr_wake2), (futexp2),               \
                              FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);                 \
-    __ret;                                                                   \
+    INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
 
-static inline int __attribute__((always_inline))
-__lll_mutex_trylock (int *futex)
-{
-  int flag = 1, old;
-#ifdef __thumb__
-  old = atomic_exchange_acq (futex, flag);
-  if (old < 1)
-    flag = 0;
-  else if (old > 1)
-    flag = atomic_exchange_acq (futex, old);
-#else
-  __asm__ __volatile__ (
-    "\tswp     %[old], %[flag], [%[futex]]     @ try to take the lock\n"
-    "\tcmp     %[old], #1                      @ check old lock value\n"
-    "\tmovlo   %[flag], #0                     @ if we got it, return 0\n"
-    "\tswphi   %[flag], %[old], [%[futex]]     @ if it was contested,\n"
-    "                                          @ restore the contested flag,\n"
-    "                                          @ and check whether that won."
-    : [futex] "+&r" (futex), [flag] "+&r" (flag), [old] "=&r" (old)
-    : : "memory" );
-#endif
+#define lll_trylock(lock)      \
+  atomic_compare_and_exchange_val_acq(&(lock), 1, 0)
+
+#define lll_cond_trylock(lock) \
+  atomic_compare_and_exchange_val_acq(&(lock), 2, 0)
+
+#define __lll_robust_trylock(futex, id) \
+  (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
+#define lll_robust_trylock(lock, id) \
+  __lll_robust_trylock (&(lock), id)
+
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+#define __lll_lock(futex, private)                                           \
+  ((void) ({                                                                 \
+    int *__futex = (futex);                                                  \
+    if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex,       \
+                                                               1, 0), 0))    \
+      {                                                                              \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+         __lll_lock_wait_private (__futex);                                  \
+       else                                                                  \
+         __lll_lock_wait (__futex, private);                                 \
+      }                                                                              \
+  }))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+#define __lll_robust_lock(futex, id, private)                                \
+  ({                                                                         \
+    int *__futex = (futex);                                                  \
+    int __val = 0;                                                           \
+                                                                             \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_lock_wait (__futex, private);                     \
+    __val;                                                                   \
+  })
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
+
+
+#define __lll_cond_lock(futex, private)                                              \
+  ((void) ({                                                                 \
+    int *__futex = (futex);                                                  \
+    if (__builtin_expect (atomic_exchange_acq (__futex, 2), 0))                      \
+      __lll_lock_wait (__futex, private);                                    \
+  }))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
-  return flag;
-}
-#define lll_mutex_trylock(lock)        __lll_mutex_trylock (&(lock))
-
-
-static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock (int *futex)
-{
-  int flag = 2, old;
-#ifdef __thumb__
-  old = atomic_exchange_acq (futex, flag);
-  if (old < 1)
-    flag = 0;
-  else if (old > 1)
-    flag = atomic_exchange_acq (futex, old);
-#else
-  __asm__ __volatile__ (
-    "\tswp     %[old], %[flag], [%[futex]]     @ try to take the lock\n"
-    "\tcmp     %[old], #1                      @ check old lock value\n"
-    "\tmovlo   %[flag], #0                     @ if we got it, return 0\n"
-    "\tswphi   %[flag], %[old], [%[futex]]     @ if it was contested,\n"
-    "                                          @ restore the contested flag,\n"
-    "                                          @ and check whether that won."
-    : [futex] "+&r" (futex), [flag] "+&r" (flag), [old] "=&r" (old)
-    : : "memory" );
-#endif
 
-  return flag;
-}
-#define lll_mutex_cond_trylock(lock)   __lll_mutex_cond_trylock (&(lock))
-
-
-static inline int __attribute__((always_inline))
-__lll_robust_mutex_trylock(int *futex, int id)
-{
-  return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
-}
-#define lll_robust_mutex_trylock(lock, id) \
-  __lll_robust_mutex_trylock (&(lock), id)
-
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
-
-static inline void __attribute__((always_inline))
-__lll_mutex_lock (int *futex)
-{
-  int val = atomic_exchange_acq (futex, 1);
-
-  if (__builtin_expect (val != 0, 0))
-    {
-      while (atomic_exchange_acq (futex, 2) != 0)
-       lll_futex_wait (futex, 2);
-    }
-}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
-
-
-static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
-{
-  int result = 0;
-  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_lock_wait (futex);
-  return result;
-}
-#define lll_robust_mutex_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), id)
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
-{
-  int val = atomic_exchange_acq (futex, 2);
-
-  if (__builtin_expect (val != 0, 0))
-    {
-      while (atomic_exchange_acq (futex, 2) != 0)
-       lll_futex_wait (futex, 2);
-    }
-}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
-
-
-#define lll_robust_mutex_cond_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
-
-
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
-       attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
-       attribute_hidden;
-
-static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
-{
-  int result = 0;
-  int val = atomic_exchange_acq (futex, 1);
-
-  if (__builtin_expect (val != 0, 0))
-    result = __lll_timedlock_wait (futex, abstime);
-  return result;
-}
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
-
-
-static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
-                             int id)
-{
-  int result = 0;
-  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_timedlock_wait (futex, abstime);
-  return result;
-}
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
-  __lll_robust_mutex_timedlock (&(futex), abstime, id)
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
-{
-  int val = atomic_exchange_rel (futex, 0);
-  if (__builtin_expect (val > 1, 0))
-    lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
-
-
-static inline void __attribute__ ((always_inline))
-__lll_robust_mutex_unlock (int *futex, int mask)
-{
-  int val = atomic_exchange_rel (futex, 0);
-  if (__builtin_expect (val & mask, 0))
-    lll_futex_wake (futex, 1);
-}
-#define lll_robust_mutex_unlock(futex) \
-  __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
-  (void) atomic_exchange_rel (futex, 0);
-  lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
-
-
-#define lll_mutex_islocked(futex) \
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+                                int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+                                       int private) attribute_hidden;
+
+#define __lll_timedlock(futex, abstime, private)                             \
+  ({                                                                         \
+     int *__futex = (futex);                                                 \
+     int __val = 0;                                                          \
+                                                                             \
+     if (__builtin_expect (atomic_exchange_acq (__futex, 1), 0))             \
+       __val = __lll_timedlock_wait (__futex, abstime, private);             \
+     __val;                                                                  \
+  })
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
+
+
+#define __lll_robust_timedlock(futex, abstime, id, private)                  \
+  ({                                                                         \
+    int *__futex = (futex);                                                  \
+    int __val = 0;                                                           \
+                                                                             \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_timedlock_wait (__futex, abstime, private);       \
+    __val;                                                                   \
+  })
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
+
+
+#define __lll_unlock(futex, private) \
+  (void)                                                       \
+    ({ int *__futex = (futex);                                 \
+       int __oldval = atomic_exchange_rel (__futex, 0);                \
+       if (__builtin_expect (__oldval > 1, 0))                 \
+        lll_futex_wake (__futex, 1, private);                  \
+    })
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
+
+
+#define __lll_robust_unlock(futex, private) \
+  (void)                                                       \
+    ({ int *__futex = (futex);                                 \
+       int __oldval = atomic_exchange_rel (__futex, 0);                \
+       if (__builtin_expect (__oldval & FUTEX_WAITERS, 0))     \
+        lll_futex_wake (__futex, 1, private);                  \
+    })
+#define lll_robust_unlock(futex, private) \
+  __lll_robust_unlock(&(futex), private)
+
+
+#define lll_islocked(futex) \
   (futex != 0)
 
 
 /* Our internal lock implementation is identical to the binary-compatible
    mutex implementation. */
 
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER           (0)
 #define LLL_LOCK_INITIALIZER_LOCKED    (1)
 
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
 /* The states of a lock are:
     0  -  untaken
     1  -  taken by one user
    >1  -  taken by more users */
 
-#define lll_trylock(lock)      lll_mutex_trylock (lock)
-#define lll_lock(lock)         lll_mutex_lock (lock)
-#define lll_unlock(lock)       lll_mutex_unlock (lock)
-#define lll_islocked(lock)     lll_mutex_islocked (lock)
-
 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -300,7 +265,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
   do {                                 \
     __typeof (tid) __tid;              \
     while ((__tid = (tid)) != 0)       \
-      lll_futex_wait (&(tid), __tid);  \
+      lll_futex_wait (&(tid), __tid, LLL_SHARED);\
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -314,26 +279,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;                                             \
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif /* lowlevellock.h */
index c892581..d81ecd4 100644 (file)
@@ -27,7 +27,7 @@ clear_once_control (void *arg)
   pthread_once_t *once_control = (pthread_once_t *) arg;
 
   *once_control = 0;
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 }
 
 int
@@ -66,7 +66,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
        break;
 
       /* Same generation, some other thread was faster. Wait.  */
-      lll_futex_wait (once_control, oldval);
+      lll_futex_wait (once_control, oldval, LLL_PRIVATE);
     }
 
   /* This thread is the first here.  Do the initialization.
@@ -82,7 +82,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
   *once_control = __fork_generation | 2;
 
   /* Wake up all other threads.  */
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 
   return 0;
 }
index 350d9af..95d5328 100644 (file)
 
 #if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
 
-/* NOTE: We do mark syscalls with unwind annotations, for the benefit of
-   cancellation; but they're really only accurate at the point of the
-   syscall.  The ARM unwind directives are not rich enough without adding
-   a custom personality function.  */
-
 # undef PSEUDO
 # define PSEUDO(name, syscall_name, args)                              \
   .section ".text";                                                    \
     cmn r0, $4096;                                                     \
     PSEUDO_RET;                                                                \
   .Lpseudo_cancel:                                                     \
-    .fnstart;                                                          \
     DOCARGS_##args;    /* save syscall args etc. around CENABLE.  */   \
     CENABLE;                                                           \
     mov ip, r0;                /* put mask in safe place.  */                  \
     UNDOCARGS_##args;  /* restore syscall args.  */                    \
-    ldr r7, =SYS_ify (syscall_name);                                   \
-    swi 0x0;           /* do the call.  */                             \
-    .fnend;            /* Past here we can't easily unwind.  */        \
-    mov r7, r0;                /* save syscall return value.  */               \
+    swi SYS_ify (syscall_name);        /* do the call.  */                     \
+    str r0, [sp, $-4]!; /* save syscall return value.  */              \
     mov r0, ip;                /* get mask back.  */                           \
     CDISABLE;                                                          \
-    mov r0, r7;                /* retrieve return value.  */                   \
-    RESTORE_LR_##args;                                                 \
+    ldmfd sp!, {r0, lr}; /* retrieve return value and address.  */     \
     UNDOARGS_##args;                                                   \
     cmn r0, $4096;
 
-/* DOARGS pushes four bytes on the stack for five arguments, eight bytes for
-   six arguments, and nothing for fewer.  In order to preserve doubleword
-   alignment, sometimes we must save an extra register.  */
-
-# define RESTART_UNWIND .fnend; .fnstart; .save {r7, lr}
-
-# define DOCARGS_0     stmfd sp!, {r7, lr}; .save {r7, lr}
+# define DOCARGS_0     str lr, [sp, #-4]!;
 # define UNDOCARGS_0
-# define RESTORE_LR_0  ldmfd sp!, {r7, lr};
 
-# define DOCARGS_1     stmfd sp!, {r0, r1, r7, lr}; .save {r7, lr}; .pad #8
-# define UNDOCARGS_1   ldr r0, [sp], #8; RESTART_UNWIND
-# define RESTORE_LR_1  RESTORE_LR_0
+# define DOCARGS_1     stmfd sp!, {r0, lr};
+# define UNDOCARGS_1   ldr r0, [sp], #4;
 
-# define DOCARGS_2     stmfd sp!, {r0, r1, r7, lr}; .save {r7, lr}; .pad #8
-# define UNDOCARGS_2   ldmfd sp!, {r0, r1}; RESTART_UNWIND
-# define RESTORE_LR_2  RESTORE_LR_0
+# define DOCARGS_2     stmfd sp!, {r0, r1, lr};
+# define UNDOCARGS_2   ldmfd sp!, {r0, r1};
 
-# define DOCARGS_3     stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #16
-# define UNDOCARGS_3   ldmfd sp!, {r0, r1, r2, r3}; RESTART_UNWIND
-# define RESTORE_LR_3  RESTORE_LR_0
+# define DOCARGS_3     stmfd sp!, {r0, r1, r2, lr};
+# define UNDOCARGS_3   ldmfd sp!, {r0, r1, r2};
 
-# define DOCARGS_4     stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #16
-# define UNDOCARGS_4   ldmfd sp!, {r0, r1, r2, r3}; RESTART_UNWIND
-# define RESTORE_LR_4  RESTORE_LR_0
+# define DOCARGS_4     stmfd sp!, {r0, r1, r2, r3, lr};
+# define UNDOCARGS_4   ldmfd sp!, {r0, r1, r2, r3};
 
-# define DOCARGS_5     .save {r4}; stmfd sp!, {r0, r1, r2, r3, r4, r7, lr}; .save {r7, lr}; .pad #20
-# define UNDOCARGS_5   ldmfd sp!, {r0, r1, r2, r3}; .fnend; .fnstart; .save {r4}; .save {r7, lr}; .pad #4
-# define RESTORE_LR_5  ldmfd sp!, {r4, r7, lr}
+# define DOCARGS_5     DOCARGS_4
+# define UNDOCARGS_5   UNDOCARGS_4
 
-# define DOCARGS_6     .save {r4, r5}; stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #20
-# define UNDOCARGS_6   ldmfd sp!, {r0, r1, r2, r3}; .fnend; .fnstart; .save {r4, r5}; .save {r7, lr}
-# define RESTORE_LR_6  RESTORE_LR_0
+# define DOCARGS_6     DOCARGS_5
+# define UNDOCARGS_6   UNDOCARGS_5
 
 # ifdef IS_IN_libpthread
 #  define CENABLE      bl PLTJMP(__pthread_enable_asynccancel)
@@ -151,3 +129,9 @@ extern int __local_multiple_threads attribute_hidden;
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index 2062028..e19facf 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>.
 
    Boston, MA 02111-1307, USA.  */
 
 #include <dlfcn.h>
-#include <string.h>
+#include <stdio.h>
 #include <unwind.h>
-#include <unistd.h>
 #include <pthreadP.h>
 
-#define __libc_dlopen(x)       dlopen(x, (RTLD_LOCAL | RTLD_LAZY))
-#define __libc_dlsym           dlsym
-
+static void *libgcc_s_handle;
 static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
 static _Unwind_Reason_Code (*libgcc_s_personality)
-  (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *);
+  (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+   struct _Unwind_Context *);
 static _Unwind_Reason_Code (*libgcc_s_forcedunwind)
   (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *);
 static _Unwind_Word (*libgcc_s_getcfa) (struct _Unwind_Context *);
+static void (*libgcc_s_sjlj_register) (struct SjLj_Function_Context *);
+static void (*libgcc_s_sjlj_unregister) (struct SjLj_Function_Context *);
 
 void
+__attribute_noinline__
 pthread_cancel_init (void)
 {
   void *resume, *personality, *forcedunwind, *getcfa;
   void *handle;
+  void *sjlj_register, *sjlj_unregister;
 
-  if (__builtin_expect (libgcc_s_getcfa != NULL, 1))
-    return;
+  if (__builtin_expect (libgcc_s_handle != NULL, 1))
+    {
+      /* Force gcc to reload all values.  */
+      asm volatile ("" ::: "memory");
+      return;
+    }
 
   handle = __libc_dlopen ("libgcc_s.so.1");
 
   if (handle == NULL
-      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
-      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL
-      || (forcedunwind = __libc_dlsym (handle, "_Unwind_ForcedUnwind"))
+      || (sjlj_register = __libc_dlsym (handle, "_Unwind_SjLj_Register")) == NULL
+      || (sjlj_unregister = __libc_dlsym (handle, "_Unwind_SjLj_Unregister")) == NULL
+      || (resume = __libc_dlsym (handle, "_Unwind_SjLj_Resume")) == NULL
+      || (personality = __libc_dlsym (handle, "__gcc_personality_sj0")) == NULL
+      || (forcedunwind = __libc_dlsym (handle, "_Unwind_SjLj_ForcedUnwind"))
         == NULL
       || (getcfa = __libc_dlsym (handle, "_Unwind_GetCFA")) == NULL
-#ifdef ARCH_CANCEL_INIT
-      || ARCH_CANCEL_INIT (handle)
-#endif
       )
-    {
-# define STR_N_LEN(str) str, strlen (str)
-      INTERNAL_SYSCALL_DECL (err);
-      INTERNAL_SYSCALL (write, err, 3, STDERR_FILENO,
-                       STR_N_LEN ("libgcc_s.so.1 must be installed for pthread_cancel to work\n"));
-      abort ();
-    }
+    __libc_fatal ("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
 
   libgcc_s_resume = resume;
   libgcc_s_personality = personality;
   libgcc_s_forcedunwind = forcedunwind;
+  libgcc_s_sjlj_register = sjlj_register;
+  libgcc_s_sjlj_unregister = sjlj_unregister;
   libgcc_s_getcfa = getcfa;
+  /* Make sure libgcc_s_getcfa is written last.  Otherwise,
+     pthread_cancel_init might return early even when the pointer the
+     caller is interested in is not initialized yet.  */
+  atomic_write_barrier ();
+  libgcc_s_handle = handle;
+}
+
+void
+__libc_freeres_fn_section
+__unwind_freeres (void)
+{
+  void *handle = libgcc_s_handle;
+  if (handle != NULL)
+    {
+      libgcc_s_handle = NULL;
+      __libc_dlclose (handle);
+    }
 }
 
-/* It's vitally important that _Unwind_Resume not have a stack frame; the
-   ARM unwinder relies on register state at entrance.  So we write this in
-   assembly.  */
-
-asm (
-#ifdef __thumb__
-"      .code 32"
-#endif
-"      .globl  _Unwind_Resume\n"
-"      .type   _Unwind_Resume, %function\n"
-"_Unwind_Resume:\n"
-"      stmfd   sp!, {r4, r5, r6, lr}\n"
-"      ldr     r4, 1f\n"
-"      ldr     r5, 2f\n"
-"3:    add     r4, pc, r4\n"
-"      ldr     r3, [r4, r5]\n"
-"      mov     r6, r0\n"
-"      cmp     r3, #0\n"
-"      beq     4f\n"
-"5:    mov     r0, r6\n"
-"      ldmfd   sp!, {r4, r5, r6, lr}\n"
-"      bx      r3\n"
-"4:    bl      pthread_cancel_init\n"
-"      ldr     r3, [r4, r5]\n"
-"      b       5b\n"
-"1:    .word   _GLOBAL_OFFSET_TABLE_ - 3b - 8\n"
-"2:    .word   libgcc_s_resume(GOTOFF)\n"
-"      .size   _Unwind_Resume, .-_Unwind_Resume\n"
-#ifdef __thumb__
-"      .code 16"
-#endif
-);
+void
+_Unwind_Resume (struct _Unwind_Exception *exc)
+{
+  if (__builtin_expect (libgcc_s_resume == NULL, 0))
+    pthread_cancel_init ();
+
+  libgcc_s_resume (exc);
+}
 
 _Unwind_Reason_Code
-__gcc_personality_v0 (_Unwind_State state,
-                     struct _Unwind_Exception *ue_header,
-                     struct _Unwind_Context *context)
+__gcc_personality_v0 (int version, _Unwind_Action actions,
+                     _Unwind_Exception_Class exception_class,
+                      struct _Unwind_Exception *ue_header,
+                      struct _Unwind_Context *context)
 {
   if (__builtin_expect (libgcc_s_personality == NULL, 0))
     pthread_cancel_init ();
-  return libgcc_s_personality (state, ue_header, context);
+
+  return libgcc_s_personality (version, actions, exception_class,
+                              ue_header, context);
 }
 
 _Unwind_Reason_Code
@@ -117,6 +114,7 @@ _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop,
 {
   if (__builtin_expect (libgcc_s_forcedunwind == NULL, 0))
     pthread_cancel_init ();
+
   return libgcc_s_forcedunwind (exc, stop, stop_argument);
 }
 
@@ -125,5 +123,24 @@ _Unwind_GetCFA (struct _Unwind_Context *context)
 {
   if (__builtin_expect (libgcc_s_getcfa == NULL, 0))
     pthread_cancel_init ();
+
   return libgcc_s_getcfa (context);
 }
+
+void
+_Unwind_SjLj_Register (struct SjLj_Function_Context *fc)
+{
+  if (__builtin_expect (libgcc_s_sjlj_register == NULL, 0))
+    pthread_cancel_init ();
+
+  libgcc_s_sjlj_register (fc);
+}
+
+void
+_Unwind_SjLj_Unregister (struct SjLj_Function_Context *fc)
+{
+  if (__builtin_expect (libgcc_s_sjlj_unregister == NULL, 0))
+    pthread_cancel_init ();
+
+  libgcc_s_sjlj_unregister (fc);
+}
index 99b1574..8dcfd34 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>.
 
 
 #include <dlfcn.h>
 #include <stdio.h>
-#include <stdlib.h>
 #include <unwind.h>
 
-#define __libc_dlopen(x)       dlopen(x, (RTLD_LOCAL | RTLD_LAZY))
-#define __libc_dlsym           dlsym
-
 static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
 static _Unwind_Reason_Code (*libgcc_s_personality)
-  (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *);
-
-static void init (void) __attribute_used__;
+  (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+   struct _Unwind_Context *);
+static void (*libgcc_s_sjlj_register) (struct SjLj_Function_Context *);
+static void (*libgcc_s_sjlj_unregister) (struct SjLj_Function_Context *);
 
 static void
 init (void)
 {
   void *resume, *personality;
   void *handle;
+  void *sjlj_register, *sjlj_unregister;
 
   handle = __libc_dlopen ("libgcc_s.so.1");
 
   if (handle == NULL
-      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
-      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL) {
-    fprintf(stderr, "libgcc_s.so.1 must be installed for pthread_cancel to work\n");
-    abort ();
-  }
+      || (sjlj_register = __libc_dlsym (handle, "_Unwind_SjLj_Register")) == NULL
+      || (sjlj_unregister = __libc_dlsym (handle, "_Unwind_SjLj_Unregister")) == NULL
+      || (resume = __libc_dlsym (handle, "_Unwind_SjLj_Resume")) == NULL
+      || (personality = __libc_dlsym (handle, "__gcc_personality_sj0")) == NULL)
+    __libc_fatal ("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
 
   libgcc_s_resume = resume;
   libgcc_s_personality = personality;
+  libgcc_s_sjlj_register = sjlj_register;
+  libgcc_s_sjlj_unregister = sjlj_unregister;
 }
 
-/* It's vitally important that _Unwind_Resume not have a stack frame; the
-   ARM unwinder relies on register state at entrance.  So we write this in
-   assembly.  */
-
-__asm__ (
-#ifdef __thumb__
-"       .code 32\n"
-#endif
-"      .globl  _Unwind_Resume\n"
-"      .type   _Unwind_Resume, %function\n"
-"_Unwind_Resume:\n"
-"      stmfd   sp!, {r4, r5, r6, lr}\n"
-"      ldr     r4, 1f\n"
-"      ldr     r5, 2f\n"
-"3:    add     r4, pc, r4\n"
-"      ldr     r3, [r4, r5]\n"
-"      mov     r6, r0\n"
-"      cmp     r3, #0\n"
-"      beq     4f\n"
-"5:    mov     r0, r6\n"
-"      ldmfd   sp!, {r4, r5, r6, lr}\n"
-"      bx      r3\n"
-"4:    bl      init\n"
-"      ldr     r3, [r4, r5]\n"
-"      b       5b\n"
-"1:    .word   _GLOBAL_OFFSET_TABLE_ - 3b - 8\n"
-"2:    .word   libgcc_s_resume(GOTOFF)\n"
-"      .size   _Unwind_Resume, .-_Unwind_Resume\n"
-#ifdef __thumb__
-"       .code 16\n"
-#endif
-
-);
+void
+_Unwind_Resume (struct _Unwind_Exception *exc)
+{
+  if (__builtin_expect (libgcc_s_resume == NULL, 0))
+    init ();
+  libgcc_s_resume (exc);
+}
 
 _Unwind_Reason_Code
-__gcc_personality_v0 (_Unwind_State state,
-                     struct _Unwind_Exception *ue_header,
-                     struct _Unwind_Context *context)
+__gcc_personality_v0 (int version, _Unwind_Action actions,
+                     _Unwind_Exception_Class exception_class,
+                      struct _Unwind_Exception *ue_header,
+                      struct _Unwind_Context *context)
 {
   if (__builtin_expect (libgcc_s_personality == NULL, 0))
     init ();
-  return libgcc_s_personality (state, ue_header, context);
+  return libgcc_s_personality (version, actions, exception_class,
+                              ue_header, context);
+}
+
+void
+_Unwind_SjLj_Register (struct SjLj_Function_Context *fc)
+{
+  if (__builtin_expect (libgcc_s_sjlj_register == NULL, 0))
+    init ();
+  libgcc_s_sjlj_register (fc);
+}
+
+void
+_Unwind_SjLj_Unregister (struct SjLj_Function_Context *fc)
+{
+  if (__builtin_expect (libgcc_s_sjlj_unregister == NULL, 0))
+    init ();
+  libgcc_s_sjlj_unregister (fc);
 }
index d625fb2..eeb9cf8 100644 (file)
@@ -1,5 +1,5 @@
 /* Header file for the ARM EABI unwinder
-   Copyright (C) 2003, 2004, 2005  Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2005, 2009  Free Software Foundation, Inc.
    Contributed by Paul Brook
 
    This file is free software; you can redistribute it and/or modify it
@@ -267,6 +267,11 @@ extern "C" {
 #define _Unwind_SetIP(context, val) \
   _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
 
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)
+     (struct _Unwind_Context *, void *);
+
+extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *);
+
 #ifdef __cplusplus
 }   /* extern "C" */
 #endif
index b639ba4..8f0df4f 100644 (file)
@@ -1,5 +1,5 @@
 /* Minimum guaranteed maximum values for system limits.  Linux version.
-   Copyright (C) 1993-1998,2000,2002,2003,2004 Free Software Foundation, Inc.
+   Copyright (C) 1993-1998,2000,2002-2004,2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -31,6 +31,9 @@
 #ifndef OPEN_MAX
 # define __undef_OPEN_MAX
 #endif
+#ifndef ARG_MAX
+# define __undef_ARG_MAX
+#endif
 
 /* The kernel sources contain a file with all the needed information.  */
 #include <linux/limits.h>
 # undef OPEN_MAX
 # undef __undef_OPEN_MAX
 #endif
+/* Have to remove ARG_MAX?  */
+#ifdef __undef_ARG_MAX
+# undef ARG_MAX
+# undef __undef_ARG_MAX
+#endif
 
 /* The number of data keys per process.  */
 #define _POSIX_THREAD_KEYS_MAX 128
@@ -87,3 +95,6 @@
 
 /* Maximum message queue priority level.  */
 #define MQ_PRIO_MAX            32768
+
+/* Maximum value the semaphore can have.  */
+#define SEM_VALUE_MAX   (2147483647)
index 92c2d32..2550355 100644 (file)
@@ -1,5 +1,5 @@
 /* Define POSIX options for Linux.
-   Copyright (C) 1996-2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 1996-2004, 2006, 2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -17,8 +17,8 @@
    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
    Boston, MA 02111-1307, USA.  */
 
-#ifndef        _POSIX_OPT_H
-#define        _POSIX_OPT_H    1
+#ifndef        _BITS_POSIX_OPT_H
+#define        _BITS_POSIX_OPT_H       1
 
 /* Job control is supported.  */
 #define        _POSIX_JOB_CONTROL      1
 #define        _POSIX_SAVED_IDS        1
 
 /* Priority scheduling is supported.  */
-#define        _POSIX_PRIORITY_SCHEDULING      200112L
+#define        _POSIX_PRIORITY_SCHEDULING      200809L
 
 /* Synchronizing file data is supported.  */
-#define        _POSIX_SYNCHRONIZED_IO  200112L
+#define        _POSIX_SYNCHRONIZED_IO  200809L
 
 /* The fsync function is present.  */
-#define        _POSIX_FSYNC    200112L
+#define        _POSIX_FSYNC    200809L
 
 /* Mapping of files to memory is supported.  */
-#define        _POSIX_MAPPED_FILES     200112L
+#define        _POSIX_MAPPED_FILES     200809L
 
 /* Locking of all memory is supported.  */
-#define        _POSIX_MEMLOCK  200112L
+#define        _POSIX_MEMLOCK  200809L
 
 /* Locking of ranges of memory is supported.  */
-#define        _POSIX_MEMLOCK_RANGE    200112L
+#define        _POSIX_MEMLOCK_RANGE    200809L
 
 /* Setting of memory protections is supported.  */
-#define        _POSIX_MEMORY_PROTECTION        200112L
+#define        _POSIX_MEMORY_PROTECTION        200809L
 
-/* Only root can change owner of file.  */
-#define        _POSIX_CHOWN_RESTRICTED 1
+/* Some filesystems allow all users to change file ownership.  */
+#define        _POSIX_CHOWN_RESTRICTED 0
 
 /* `c_cc' member of 'struct termios' structure can be disabled by
    using the value _POSIX_VDISABLE.  */
 /* X/Open realtime support is available.  */
 #define _XOPEN_REALTIME        1
 
+/* X/Open thread realtime support is available.  */
+#define _XOPEN_REALTIME_THREADS        1
+
 /* XPG4.2 shared memory is supported.  */
 #define        _XOPEN_SHM      1
 
 /* Tell we have POSIX threads.  */
-#define _POSIX_THREADS 200112L
+#define _POSIX_THREADS 200809L
 
 /* We have the reentrant functions described in POSIX.  */
 #define _POSIX_REENTRANT_FUNCTIONS      1
-#define _POSIX_THREAD_SAFE_FUNCTIONS   200112L
+#define _POSIX_THREAD_SAFE_FUNCTIONS   200809L
 
 /* We provide priority scheduling for threads.  */
-#define _POSIX_THREAD_PRIORITY_SCHEDULING      200112L
+#define _POSIX_THREAD_PRIORITY_SCHEDULING      200809L
 
 /* We support user-defined stack sizes.  */
-#define _POSIX_THREAD_ATTR_STACKSIZE   200112L
+#define _POSIX_THREAD_ATTR_STACKSIZE   200809L
 
 /* We support user-defined stacks.  */
-#define _POSIX_THREAD_ATTR_STACKADDR   200112L
+#define _POSIX_THREAD_ATTR_STACKADDR   200809L
+
+/* We support priority inheritence.  */
+#define _POSIX_THREAD_PRIO_INHERIT     200809L
+
+/* We support priority protection, though only for non-robust
+   mutexes.  */
+#define _POSIX_THREAD_PRIO_PROTECT     200809L
+
+#ifdef __USE_XOPEN2K8
+/* We support priority inheritence for robust mutexes.  */
+# define _POSIX_THREAD_ROBUST_PRIO_INHERIT     200809L
+
+/* We do not support priority protection for robust mutexes.  */
+# define _POSIX_THREAD_ROBUST_PRIO_PROTECT     -1
+#endif
 
 /* We support POSIX.1b semaphores.  */
-#define _POSIX_SEMAPHORES      200112L
+#define _POSIX_SEMAPHORES      200809L
 
 /* Real-time signals are supported.  */
-#define _POSIX_REALTIME_SIGNALS        200112L
+#define _POSIX_REALTIME_SIGNALS        200809L
 
 /* We support asynchronous I/O.  */
-#define _POSIX_ASYNCHRONOUS_IO 200112L
+#define _POSIX_ASYNCHRONOUS_IO 200809L
 #define _POSIX_ASYNC_IO                1
 /* Alternative name for Unix98.  */
 #define _LFS_ASYNCHRONOUS_IO   1
 /* Support for prioritization is also available.  */
-#define _POSIX_PRIORITIZED_IO  200112L
+#define _POSIX_PRIORITIZED_IO  200809L
 
 /* The LFS support in asynchronous I/O is also available.  */
 #define _LFS64_ASYNCHRONOUS_IO 1
 #define _LFS64_STDIO           1
 
 /* POSIX shared memory objects are implemented.  */
-#define _POSIX_SHARED_MEMORY_OBJECTS   200112L
+#define _POSIX_SHARED_MEMORY_OBJECTS   200809L
 
 /* CPU-time clocks support needs to be checked at runtime.  */
 #define _POSIX_CPUTIME 0
 #define _POSIX_REGEXP  1
 
 /* Reader/Writer locks are available.  */
-#define _POSIX_READER_WRITER_LOCKS     200112L
+#define _POSIX_READER_WRITER_LOCKS     200809L
 
 /* We have a POSIX shell.  */
 #define _POSIX_SHELL   1
 
 /* We support the Timeouts option.  */
-#define _POSIX_TIMEOUTS        200112L
+#define _POSIX_TIMEOUTS        200809L
 
 /* We support spinlocks.  */
-#define _POSIX_SPIN_LOCKS      200112L
+#define _POSIX_SPIN_LOCKS      200809L
 
 /* The `spawn' function family is supported.  */
-#define _POSIX_SPAWN   200112L
+#define _POSIX_SPAWN   200809L
 
 /* We have POSIX timers.  */
-#define _POSIX_TIMERS  200112L
+#define _POSIX_TIMERS  200809L
 
 /* The barrier functions are available.  */
-#define _POSIX_BARRIERS        200112L
+#define _POSIX_BARRIERS        200809L
 
 /* POSIX message queues are available.  */
-#define        _POSIX_MESSAGE_PASSING  200112L
+#define        _POSIX_MESSAGE_PASSING  200809L
 
 /* Thread process-shared synchronization is supported.  */
-#define _POSIX_THREAD_PROCESS_SHARED   200112L
+#define _POSIX_THREAD_PROCESS_SHARED   200809L
 
 /* The monotonic clock might be available.  */
 #define _POSIX_MONOTONIC_CLOCK 0
 
 /* The clock selection interfaces are available.  */
-#define _POSIX_CLOCK_SELECTION 200112L
+#define _POSIX_CLOCK_SELECTION 200809L
 
 /* Advisory information interfaces are available.  */
-#define _POSIX_ADVISORY_INFO   200112L
+#define _POSIX_ADVISORY_INFO   200809L
 
 /* IPv6 support is available.  */
-#define _POSIX_IPV6    200112L
+#define _POSIX_IPV6    200809L
 
 /* Raw socket support is available.  */
-#define _POSIX_RAW_SOCKETS     200112L
+#define _POSIX_RAW_SOCKETS     200809L
 
 /* We have at least one terminal.  */
-#define _POSIX2_CHAR_TERM      200112L
+#define _POSIX2_CHAR_TERM      200809L
 
 /* Neither process nor thread sporadic server interfaces is available.  */
 #define _POSIX_SPORADIC_SERVER -1
 /* Typed memory objects are not available.  */
 #define _POSIX_TYPED_MEMORY_OBJECTS    -1
 
-/* No support for priority inheritance or protection so far.  */
-#define _POSIX_THREAD_PRIO_INHERIT     -1
-#define _POSIX_THREAD_PRIO_PROTECT     -1
-
-#endif /* posix_opt.h */
+#endif /* bits/posix_opt.h */
index a84b5c2..2d4cae2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -58,8 +58,9 @@ fresetlockfiles (void)
 #endif
 }
 
-extern __typeof(fork) __libc_fork;
-pid_t __libc_fork (void)
+
+pid_t
+__libc_fork (void)
 {
   pid_t pid;
   struct used_handler
@@ -73,6 +74,9 @@ pid_t __libc_fork (void)
   struct fork_handler *runp;
   while ((runp = __fork_handlers) != NULL)
     {
+      /* Make sure we read from the current RUNP pointer.  */
+      atomic_full_barrier ();
+
       unsigned int oldval = runp->refcntr;
 
       if (oldval == 0)
@@ -166,6 +170,8 @@ pid_t __libc_fork (void)
       /* Reset locks in the I/O code.  */
       STDIO_INIT_MUTEX(_stdio_openlist_add_lock);
 
+      /* XXX reset any locks in dynamic loader */
+
       /* Run the handlers registered for the child.  */
       while (allp != NULL)
        {
@@ -173,8 +179,11 @@ pid_t __libc_fork (void)
            allp->handler->child_handler ();
 
          /* Note that we do not have to wake any possible waiter.
-            This is the only thread in the new process.  */
-         --allp->handler->refcntr;
+            This is the only thread in the new process.  The count
+            may have been bumped up by other threads doing a fork.
+            We reset it to 1, to avoid waiting for non-existing
+            thread(s) to release the count.  */
+         allp->handler->refcntr = 1;
 
          /* XXX We could at this point look through the object pool
             and mark all objects not on the __fork_handlers list as
@@ -186,7 +195,7 @@ pid_t __libc_fork (void)
        }
 
       /* Initialize the fork lock.  */
-      __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER;
+      __fork_lock = LLL_LOCK_INITIALIZER;
     }
   else
     {
@@ -206,7 +215,7 @@ pid_t __libc_fork (void)
 
          if (atomic_decrement_and_test (&allp->handler->refcntr)
              && allp->handler->need_signal)
-           lll_futex_wake (allp->handler->refcntr, 1);
+           lll_futex_wake (allp->handler->refcntr, 1, LLL_PRIVATE);
 
          allp = allp->next;
        }
index bcdf621..a00cfab 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,7 +26,7 @@ extern unsigned long int __fork_generation attribute_hidden;
 extern unsigned long int *__fork_generation_pointer attribute_hidden;
 
 /* Lock to protect allocation and deallocation of fork handlers.  */
-extern lll_lock_t __fork_lock attribute_hidden;
+extern int __fork_lock attribute_hidden;
 
 /* Elements of the fork handler lists.  */
 struct fork_handler
@@ -41,7 +41,7 @@ struct fork_handler
 };
 
 /* The single linked list of all currently registered for handlers.  */
-extern struct fork_handler *__fork_handlers;
+extern struct fork_handler *__fork_handlers attribute_hidden;
 
 
 /* Function to call to unregister fork handlers.  */
@@ -54,3 +54,7 @@ extern int __register_atfork (void (*__prepare) (void),
                              void (*__parent) (void),
                              void (*__child) (void),
                              void *dso_handle);
+libc_hidden_proto (__register_atfork)
+
+/* Add a new element to the fork list.  */
+extern void __linkin_atfork (struct fork_handler *newp) attribute_hidden;
index 9a4f51c..96e2bf4 100644 (file)
 #include <sysdep.h>
 
 
-extern __typeof(getpid) __getpid;
 #ifndef NOT_IN_libc
-static __always_inline pid_t really_getpid (pid_t oldval);
+static inline __attribute__((always_inline)) pid_t really_getpid (pid_t oldval);
 
-static __always_inline pid_t really_getpid (pid_t oldval)
+static inline __attribute__((always_inline)) pid_t
+really_getpid (pid_t oldval)
 {
   if (__builtin_expect (oldval == 0, 1))
     {
@@ -46,7 +46,8 @@ static __always_inline pid_t really_getpid (pid_t oldval)
 }
 #endif
 
-pid_t __getpid (void)
+pid_t
+__getpid (void)
 {
 #ifdef NOT_IN_libc
   INTERNAL_SYSCALL_DECL (err);
index 740ee7f..9bb1938 100644 (file)
@@ -15,7 +15,7 @@ libc_a_CSRC = fork.c
 libc_a_SSRC = clone.S vfork.S
 
 libpthread_SSRC += i486/lowlevellock.S i486/pthread_barrier_wait.S i486/pthread_cond_signal.S i486/pthread_cond_broadcast.S \
-                  i486/sem_post.S i486/sem_timedwait.S \
+                  i486/lowlevelrobustlock.S i486/sem_post.S i486/sem_timedwait.S \
                   i486/sem_trywait.S i486/sem_wait.S i486/pthread_rwlock_rdlock.S i486/pthread_rwlock_wrlock.S \
                   i486/pthread_rwlock_timedrdlock.S i486/pthread_rwlock_timedwrlock.S i486/pthread_rwlock_unlock.S
 #i486/pthread_cond_timedwait.S i486/pthread_cond_wait.S
@@ -31,6 +31,7 @@ endif
 
 ASFLAGS-pt-vfork.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
 ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevelrobustlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
 ASFLAGS-pthread_once.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
 ASFLAGS-pthread_spin_unlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
 
index 0ec6e55..9e3e016 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -128,7 +128,10 @@ typedef union
     unsigned int __nr_writers_queued;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
-    unsigned int __flags;
+    unsigned char __flags;
+    unsigned char __shared;
+    unsigned char __pad1;
+    unsigned char __pad2;
     int __writer;
   } __data;
   char __size[__SIZEOF_PTHREAD_RWLOCK_T];
@@ -165,6 +168,6 @@ typedef union
 
 
 /* Extra attributes for the cleanup functions.  */
-#define __cleanup_fct_attribute __attribute ((regparm (1)))
+#define __cleanup_fct_attribute __attribute__ ((__regparm__ (1)))
 
 #endif /* bits/pthreadtypes.h */
index e6c5d84..934493c 100644 (file)
@@ -28,9 +28,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
index b874538..813e529 100644 (file)
 
 #include <sched.h>
 #include <signal.h>
-#include <stdio.h>
 #include <sysdep.h>
 #include <tls.h>
 
+
 #define ARCH_FORK() \
   INLINE_SYSCALL (clone, 5,                                                  \
                  CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0,     \
index 223b111..ce8ad27 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
-/* In libc.so we do not unconditionally use the lock prefix.  Only if
-   the application is using threads.  */
-#ifndef UP
-# define LOCK \
-       cmpl    $0, %gs:MULTIPLE_THREADS_OFFSET;                              \
-       je,pt   0f;                                                           \
-       lock;                                                                 \
-0:
-#endif
-
 #include "lowlevellock.S"
index 955e119..61255a0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
 
        .text
 
-#ifndef LOCK
-# ifdef UP
-#  define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+       movl    $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+       movl    $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+       xorl    $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+       xorl    $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+       xorl    $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+       movl    %gs:PRIVATE_FUTEX, reg
 # else
-#  define LOCK lock
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+       movl    %gs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT, reg
 # endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+       movl    %gs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %gs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %gs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %gs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %gs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAKE, reg
 #endif
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-
-       .globl  __lll_mutex_lock_wait
-       .type   __lll_mutex_lock_wait,@function
-       .hidden __lll_mutex_lock_wait
+       .globl  __lll_lock_wait_private
+       .type   __lll_lock_wait_private,@function
+       .hidden __lll_lock_wait_private
        .align  16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
+       cfi_startproc
        pushl   %edx
+       cfi_adjust_cfa_offset(4)
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%edx, -8)
+       cfi_offset(%ebx, -12)
+       cfi_offset(%esi, -16)
 
        movl    $2, %edx
        movl    %ecx, %ebx
        xorl    %esi, %esi      /* No timeout.  */
-       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
+       LOAD_PRIVATE_FUTEX_WAIT (%ecx)
 
        cmpl    %edx, %eax      /* NB:   %edx == 2 */
        jne 2f
@@ -58,41 +98,162 @@ __lll_mutex_lock_wait:
        xchgl   %eax, (%ebx)    /* NB:   lock is implied */
 
        testl   %eax, %eax
-       jnz,pn  1b
+       jnz     1b
 
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %edx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edx)
        ret
-       .size   __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+       cfi_endproc
+       .size   __lll_lock_wait_private,.-__lll_lock_wait_private
 
 #ifdef NOT_IN_libc
-       .globl  __lll_mutex_timedlock_wait
-       .type   __lll_mutex_timedlock_wait,@function
-       .hidden __lll_mutex_timedlock_wait
+       .globl  __lll_lock_wait
+       .type   __lll_lock_wait,@function
+       .hidden __lll_lock_wait
+       .align  16
+__lll_lock_wait:
+       cfi_startproc
+       pushl   %edx
+       cfi_adjust_cfa_offset(4)
+       pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%edx, -8)
+       cfi_offset(%ebx, -12)
+       cfi_offset(%esi, -16)
+
+       movl    %edx, %ebx
+       movl    $2, %edx
+       xorl    %esi, %esi      /* No timeout.  */
+       LOAD_FUTEX_WAIT (%ecx)
+
+       cmpl    %edx, %eax      /* NB:   %edx == 2 */
+       jne 2f
+
+1:     movl    $SYS_futex, %eax
+       ENTER_KERNEL
+
+2:     movl    %edx, %eax
+       xchgl   %eax, (%ebx)    /* NB:   lock is implied */
+
+       testl   %eax, %eax
+       jnz     1b
+
+       popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
+       popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       popl    %edx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edx)
+       ret
+       cfi_endproc
+       .size   __lll_lock_wait,.-__lll_lock_wait
+
+       /*      %ecx: futex
+               %esi: flags
+               %edx: timeout
+               %eax: futex value
+       */
+       .globl  __lll_timedlock_wait
+       .type   __lll_timedlock_wait,@function
+       .hidden __lll_timedlock_wait
        .align  16
-__lll_mutex_timedlock_wait:
+__lll_timedlock_wait:
+       cfi_startproc
+       pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebp, 0)
+       pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebx, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+       LOAD_PIC_REG (bx)
+       cmpl    $0, __have_futex_clock_realtime@GOTOFF(%ebx)
+#  else
+       cmpl    $0, __have_futex_clock_realtime
+#  endif
+       je      .Lreltmo
+# endif
+
+       movl    %ecx, %ebx
+       movl    %esi, %ecx
+       movl    %edx, %esi
+       movl    $0xffffffff, %ebp
+       LOAD_FUTEX_WAIT_ABS (%ecx)
+
+       movl    $2, %edx
+       cmpl    %edx, %eax
+       jne     2f
+
+1:     movl    $SYS_futex, %eax
+       movl    $2, %edx
+       ENTER_KERNEL
+
+2:     xchgl   %edx, (%ebx)    /* NB:   lock is implied */
+
+       testl   %edx, %edx
+       jz      3f
+
+       cmpl    $-ETIMEDOUT, %eax
+       je      4f
+       cmpl    $-EINVAL, %eax
+       jne     1b
+4:     movl    %eax, %edx
+       negl    %edx
+
+3:     movl    %edx, %eax
+7:     popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
+       ret
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
        /* Check for a valid timeout value.  */
        cmpl    $1000000000, 4(%edx)
        jae     3f
 
-       pushl   %edi
        pushl   %esi
-       pushl   %ebx
-       pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%esi, 0)
+       pushl   %edi
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%edi, 0)
 
        /* Stack frame for the timespec and timeval structs.  */
        subl    $8, %esp
+       cfi_adjust_cfa_offset(8)
 
        movl    %ecx, %ebp
        movl    %edx, %edi
 
+       movl    $2, %edx
+       xchgl   %edx, (%ebp)
+
+       test    %edx, %edx
+       je      6f
+
 1:
        /* Get current time.  */
        movl    %esp, %ebx
        xorl    %ecx, %ecx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
 
        /* Compute relative timeout.  */
@@ -107,116 +268,128 @@ __lll_mutex_timedlock_wait:
        addl    $1000000000, %edx
        subl    $1, %ecx
 4:     testl   %ecx, %ecx
-       js      5f              /* Time is already up.  */
+       js      2f              /* Time is already up.  */
 
        /* Store relative timeout.  */
        movl    %ecx, (%esp)
        movl    %edx, 4(%esp)
 
+       /* Futex call.  */
        movl    %ebp, %ebx
-
-       movl    $1, %eax
        movl    $2, %edx
-       LOCK
-       cmpxchgl %edx, (%ebx)
-
-       testl   %eax, %eax
-       je      8f
-
-       /* Futex call.  */
        movl    %esp, %esi
-       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
+       movl    16(%esp), %ecx
+       LOAD_FUTEX_WAIT (%ecx)
        movl    $SYS_futex, %eax
        ENTER_KERNEL
-       movl    %eax, %ecx
 
-8:                             /* NB: %edx == 2 */
-       xorl    %eax, %eax
-       LOCK
-       cmpxchgl %edx, (%ebx)
+       /* NB: %edx == 2 */
+       xchgl   %edx, (%ebp)
 
-       jnz     7f
+       testl   %edx, %edx
+       je      6f
+
+       cmpl    $-ETIMEDOUT, %eax
+       jne     1b
+2:     movl    $ETIMEDOUT, %edx
 
 6:     addl    $8, %esp
-       popl    %ebp
-       popl    %ebx
-       popl    %esi
+       cfi_adjust_cfa_offset(-8)
        popl    %edi
-       ret
-
-       /* Check whether the time expired.  */
-7:     cmpl    $-ETIMEDOUT, %ecx
-       je      5f
-
-       /* Make sure the current holder knows we are going to sleep.  */
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
+       popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
+7:     popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
        movl    %edx, %eax
-       xchgl   %eax, (%ebx)
-       testl   %eax, %eax
-       jz      6b
-       jmp     1b
-
-3:     movl    $EINVAL, %eax
        ret
 
-5:     movl    $ETIMEDOUT, %eax
-       jmp     6b
-       .size   __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+3:     movl    $EINVAL, %edx
+       jmp     7b
+# endif
+       cfi_endproc
+       .size   __lll_timedlock_wait,.-__lll_timedlock_wait
 #endif
 
-
-#ifdef NOT_IN_libc
-       .globl  lll_unlock_wake_cb
-       .type   lll_unlock_wake_cb,@function
-       .hidden lll_unlock_wake_cb
+       .globl  __lll_unlock_wake_private
+       .type   __lll_unlock_wake_private,@function
+       .hidden __lll_unlock_wake_private
        .align  16
-lll_unlock_wake_cb:
+__lll_unlock_wake_private:
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
        pushl   %ecx
+       cfi_adjust_cfa_offset(4)
        pushl   %edx
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
+       cfi_offset(%ecx, -12)
+       cfi_offset(%edx, -16)
 
-       movl    20(%esp), %ebx
-       LOCK
-       subl    $1, (%ebx)
-       je      1f
-
-       movl    $FUTEX_WAKE, %ecx
+       movl    %eax, %ebx
+       movl    $0, (%eax)
+       LOAD_PRIVATE_FUTEX_WAKE (%ecx)
        movl    $1, %edx        /* Wake one thread.  */
        movl    $SYS_futex, %eax
-       movl    $0, (%ebx)
        ENTER_KERNEL
 
-1:     popl    %edx
+       popl    %edx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edx)
        popl    %ecx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ecx)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
-       .size   lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+       cfi_endproc
+       .size   __lll_unlock_wake_private,.-__lll_unlock_wake_private
 
-       .globl  __lll_mutex_unlock_wake
-       .type   __lll_mutex_unlock_wake,@function
-       .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+       .globl  __lll_unlock_wake
+       .type   __lll_unlock_wake,@function
+       .hidden __lll_unlock_wake
        .align  16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
        pushl   %ecx
+       cfi_adjust_cfa_offset(4)
        pushl   %edx
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
+       cfi_offset(%ecx, -12)
+       cfi_offset(%edx, -16)
 
        movl    %eax, %ebx
        movl    $0, (%eax)
-       movl    $FUTEX_WAKE, %ecx
+       LOAD_FUTEX_WAKE (%ecx)
        movl    $1, %edx        /* Wake one thread.  */
        movl    $SYS_futex, %eax
        ENTER_KERNEL
 
        popl    %edx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edx)
        popl    %ecx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ecx)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
-       .size   __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+       cfi_endproc
+       .size   __lll_unlock_wake,.-__lll_unlock_wake
 
-
-#ifdef NOT_IN_libc
        .globl  __lll_timedwait_tid
        .type   __lll_timedwait_tid,@function
        .hidden __lll_timedwait_tid
@@ -234,7 +407,7 @@ __lll_timedwait_tid:
        /* Get current time.  */
 2:     movl    %esp, %ebx
        xorl    %ecx, %ecx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
 
        /* Compute relative timeout.  */
@@ -259,6 +432,8 @@ __lll_timedwait_tid:
        jz      4f
 
        movl    %esp, %esi
+       /* XXX The kernel so far uses global futex for the wakeup at
+          all times.  */
        xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
        movl    %ebp, %ebx
        movl    $SYS_futex, %eax
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..5967634
--- /dev/null
@@ -0,0 +1,233 @@
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <bits/kernel-features.h>
+
+       .text
+
+#define FUTEX_WAITERS          0x80000000
+#define FUTEX_OWNER_DIED       0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+       xorl    $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %gs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %gs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT, reg
+# endif
+#endif
+
+       .globl  __lll_robust_lock_wait
+       .type   __lll_robust_lock_wait,@function
+       .hidden __lll_robust_lock_wait
+       .align  16
+__lll_robust_lock_wait:
+       cfi_startproc
+       pushl   %edx
+       cfi_adjust_cfa_offset(4)
+       pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%edx, -8)
+       cfi_offset(%ebx, -12)
+       cfi_offset(%esi, -16)
+
+       movl    %edx, %ebx
+       xorl    %esi, %esi      /* No timeout.  */
+       LOAD_FUTEX_WAIT (%ecx)
+
+4:     movl    %eax, %edx
+       orl     $FUTEX_WAITERS, %edx
+
+       testl   $FUTEX_OWNER_DIED, %eax
+       jnz     3f
+
+       cmpl    %edx, %eax      /* NB:   %edx == 2 */
+       je      1f
+
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jnz     2f
+
+1:     movl    $SYS_futex, %eax
+       ENTER_KERNEL
+
+       movl    (%ebx), %eax
+
+2:     test    %eax, %eax
+       jne     4b
+
+       movl    %gs:TID, %edx
+       orl     $FUTEX_WAITERS, %edx
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jnz     4b
+       /* NB:   %eax == 0 */
+
+3:     popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
+       popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       popl    %edx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edx)
+       ret
+       cfi_endproc
+       .size   __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+       .globl  __lll_robust_timedlock_wait
+       .type   __lll_robust_timedlock_wait,@function
+       .hidden __lll_robust_timedlock_wait
+       .align  16
+__lll_robust_timedlock_wait:
+       cfi_startproc
+       /* Check for a valid timeout value.  */
+       cmpl    $1000000000, 4(%edx)
+       jae     3f
+
+       pushl   %edi
+       cfi_adjust_cfa_offset(4)
+       pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%edi, -8)
+       cfi_offset(%esi, -12)
+       cfi_offset(%ebx, -16)
+       cfi_offset(%ebp, -20)
+
+       /* Stack frame for the timespec and timeval structs.  */
+       subl    $12, %esp
+       cfi_adjust_cfa_offset(12)
+
+       movl    %ecx, %ebp
+       movl    %edx, %edi
+
+1:     movl    %eax, 8(%esp)
+
+       /* Get current time.  */
+       movl    %esp, %ebx
+       xorl    %ecx, %ecx
+       movl    $__NR_gettimeofday, %eax
+       ENTER_KERNEL
+
+       /* Compute relative timeout.  */
+       movl    4(%esp), %eax
+       movl    $1000, %edx
+       mul     %edx            /* Milli seconds to nano seconds.  */
+       movl    (%edi), %ecx
+       movl    4(%edi), %edx
+       subl    (%esp), %ecx
+       subl    %eax, %edx
+       jns     4f
+       addl    $1000000000, %edx
+       subl    $1, %ecx
+4:     testl   %ecx, %ecx
+       js      8f              /* Time is already up.  */
+
+       /* Store relative timeout.  */
+       movl    %ecx, (%esp)
+       movl    %edx, 4(%esp)
+
+       movl    %ebp, %ebx
+
+       movl    8(%esp), %edx
+       movl    %edx, %eax
+       orl     $FUTEX_WAITERS, %edx
+
+       testl   $FUTEX_OWNER_DIED, %eax
+       jnz     6f
+
+       cmpl    %eax, %edx
+       je      2f
+
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       movl    $0, %ecx        /* Must use mov to avoid changing cc.  */
+       jnz     5f
+
+2:
+       /* Futex call.  */
+       movl    %esp, %esi
+       movl    20(%esp), %ecx
+       LOAD_FUTEX_WAIT (%ecx)
+       movl    $SYS_futex, %eax
+       ENTER_KERNEL
+       movl    %eax, %ecx
+
+       movl    (%ebx), %eax
+
+5:     testl   %eax, %eax
+       jne     7f
+
+       movl    %gs:TID, %edx
+       orl     $FUTEX_WAITERS, %edx
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jnz     7f
+
+6:     addl    $12, %esp
+       cfi_adjust_cfa_offset(-12)
+       popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
+       popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
+       popl    %edi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
+       ret
+
+3:     movl    $EINVAL, %eax
+       ret
+
+       cfi_adjust_cfa_offset(28)
+       cfi_offset(%edi, -8)
+       cfi_offset(%esi, -12)
+       cfi_offset(%ebx, -16)
+       cfi_offset(%ebp, -20)
+       /* Check whether the time expired.  */
+7:     cmpl    $-ETIMEDOUT, %ecx
+       jne     1b
+
+8:     movl    $ETIMEDOUT, %eax
+       jmp     6b
+       cfi_endproc
+       .size   __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
index 2af9e38..040d7f8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelbarrier.h>
 
-#define FUTEX_WAIT     0
-#define FUTEX_WAKE     1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
        .text
 
        .globl  pthread_barrier_wait
        .type   pthread_barrier_wait,@function
        .align  16
 pthread_barrier_wait:
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
 
        movl    8(%esp), %ebx
 
@@ -54,6 +48,8 @@ pthread_barrier_wait:
 
        /* There are more threads to come.  */
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%esi, -12)
 
 #if CURR_EVENT == 0
        movl    (%ebx), %edx
@@ -68,7 +64,13 @@ pthread_barrier_wait:
 
        /* Wait for the remaining threads.  The call will return immediately
           if the CURR_EVENT memory has meanwhile been changed.  */
-7:     xorl    %ecx, %ecx              /* movl $FUTEX_WAIT, %ecx */
+7:
+#if FUTEX_WAIT == 0
+       movl    PRIVATE(%ebx), %ecx
+#else
+       movl    $FUTEX_WAIT, %ecx
+       orl     PRIVATE(%ebx), %ecx
+#endif
        xorl    %esi, %esi
 8:     movl    $SYS_futex, %eax
        ENTER_KERNEL
@@ -81,7 +83,7 @@ pthread_barrier_wait:
 #else
        cmpl    %edx, CURR_EVENT(%ebx)
 #endif
-       je,pn   8b
+       je      8b
 
        /* Increment LEFT.  If this brings the count back to the
           initial count unlock the object.  */
@@ -91,7 +93,7 @@ pthread_barrier_wait:
        xaddl   %edx, LEFT(%ebx)
        subl    $1, %ecx
        cmpl    %ecx, %edx
-       jne,pt  10f
+       jne     10f
 
        /* Release the mutex.  We cannot release the lock before
           waking the waiting threads since otherwise a new thread might
@@ -104,9 +106,16 @@ pthread_barrier_wait:
 10:    movl    %esi, %eax              /* != PTHREAD_BARRIER_SERIAL_THREAD */
 
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
 
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
+       
        /* The necessary number of threads arrived.  */
 3:
 #if CURR_EVENT == 0
@@ -119,6 +128,7 @@ pthread_barrier_wait:
           so 0x7fffffff is the highest value.  */
        movl    $0x7fffffff, %edx
        movl    $FUTEX_WAKE, %ecx
+       orl     PRIVATE(%ebx), %ecx
        movl    $SYS_futex, %eax
        ENTER_KERNEL
 
@@ -130,7 +140,7 @@ pthread_barrier_wait:
        xaddl   %edx, LEFT(%ebx)
        subl    $1, %ecx
        cmpl    %ecx, %edx
-       jne,pt  5f
+       jne     5f
 
        /* Release the mutex.  We cannot release the lock before
           waking the waiting threads since otherwise a new thread might
@@ -142,21 +152,36 @@ pthread_barrier_wait:
 5:     orl     $-1, %eax               /* == PTHREAD_BARRIER_SERIAL_THREAD */
 
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
 
-1:     leal    MUTEX(%ebx), %ecx
-       call    __lll_mutex_lock_wait
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
+1:     movl    PRIVATE(%ebx), %ecx
+       leal    MUTEX(%ebx), %edx
+       xorl    $LLL_SHARED, %ecx
+       call    __lll_lock_wait
        jmp     2b
 
-4:     leal    MUTEX(%ebx), %eax
-       call    __lll_mutex_unlock_wake
+4:     movl    PRIVATE(%ebx), %ecx
+       leal    MUTEX(%ebx), %eax
+       xorl    $LLL_SHARED, %ecx
+       call    __lll_unlock_wake
        jmp     5b
 
-6:     leal    MUTEX(%ebx), %eax
-       call    __lll_mutex_unlock_wake
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%esi, -12)
+6:     movl    PRIVATE(%ebx), %ecx
+       leal    MUTEX(%ebx), %eax
+       xorl    $LLL_SHARED, %ecx
+       call    __lll_unlock_wake
        jmp     7b
 
-9:     leal    MUTEX(%ebx), %eax
-       call    __lll_mutex_unlock_wake
+9:     movl    PRIVATE(%ebx), %ecx
+       leal    MUTEX(%ebx), %eax
+       xorl    $LLL_SHARED, %ecx
+       call    __lll_unlock_wake
        jmp     10b
+       cfi_endproc
        .size   pthread_barrier_wait,.-pthread_barrier_wait
index 6e8ffe6..669b96a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2006,2007,2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-#define FUTEX_REQUEUE          3
-#define FUTEX_CMP_REQUEUE      4
-
-#define EINVAL                 22
-
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
 
        .text
 
        .type   __pthread_cond_broadcast, @function
        .align  16
 __pthread_cond_broadcast:
-
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebx, 0)
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%esi, 0)
        pushl   %edi
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%edi, 0)
        pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebp, 0)
+       cfi_remember_state
 
        movl    20(%esp), %ebx
 
@@ -92,8 +90,24 @@ __pthread_cond_broadcast:
 8:     cmpl    $-1, %edi
        je      9f
 
+       /* Do not use requeue for pshared condvars.  */
+       testl   $PS_BIT, MUTEX_KIND(%edi)
+       jne     9f
+
+       /* Requeue to a non-robust PI mutex if the PI bit is set and
+          the robust bit is not set.  */
+       movl    MUTEX_KIND(%edi), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       je      81f
+
        /* Wake up all threads.  */
-       movl    $FUTEX_CMP_REQUEUE, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx
+#else
+       movl    %gs:PRIVATE_FUTEX, %ecx
+       orl     $FUTEX_CMP_REQUEUE, %ecx
+#endif
        movl    $SYS_futex, %eax
        movl    $0x7fffffff, %esi
        movl    $1, %edx
@@ -111,51 +125,113 @@ __pthread_cond_broadcast:
        cmpl    $0xfffff001, %eax
        jae     9f
 
-10:    xorl    %eax, %eax
+6:     xorl    %eax, %eax
        popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
        popl    %edi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
 
-       .align  16
-       /* Unlock.  */
-4:     LOCK
-       subl    $1, cond_lock-cond_futex(%ebx)
-       jne     5f
+       cfi_restore_state
 
-6:     xorl    %eax, %eax
-       popl    %ebp
-       popl    %edi
-       popl    %esi
-       popl    %ebx
-       ret
+81:    movl    $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+       movl    $SYS_futex, %eax
+       movl    $0x7fffffff, %esi
+       movl    $1, %edx
+       /* Get the address of the futex involved.  */
+# if MUTEX_FUTEX != 0
+       addl    $MUTEX_FUTEX, %edi
+# endif
+       int     $0x80
+
+       /* For any kind of error, which mainly is EAGAIN, we try again
+       with WAKE.  The general test also covers running on old
+       kernels.  */
+       cmpl    $0xfffff001, %eax
+       jb      6b
+       jmp     9f
 
        /* Initial locking failed.  */
 1:
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
 #endif
-       call    __lll_mutex_lock_wait
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_lock_wait
        jmp     2b
 
-       /* Unlock in loop requires waekup.  */
+       .align  16
+       /* Unlock.  */
+4:     LOCK
+       subl    $1, cond_lock-cond_futex(%ebx)
+       je      6b
+
+       /* Unlock in loop requires wakeup.  */
 5:     leal    cond_lock-cond_futex(%ebx), %eax
-       call    __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
        jmp     6b
 
-       /* Unlock in loop requires waekup.  */
+       /* Unlock in loop requires wakeup.  */
 7:     leal    cond_lock-cond_futex(%ebx), %eax
-       call    __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
        jmp     8b
 
 9:     /* The futex requeue functionality is not available.  */
        movl    $0x7fffffff, %edx
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $SYS_futex, %eax
        ENTER_KERNEL
-       jmp     10b
+       jmp     6b
+       cfi_endproc
        .size   __pthread_cond_broadcast, .-__pthread_cond_broadcast
 weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
index ec82179..54e80d0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2007,2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-#define FUTEX_REQUEUE          3
-
-#define EINVAL                 22
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
 
 
        .text
        .align  16
 __pthread_cond_signal:
 
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebx, 0)
        pushl   %edi
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%edi, 0)
+       cfi_remember_state
 
        movl    12(%esp), %edi
 
@@ -77,35 +74,141 @@ __pthread_cond_signal:
        addl    $1, (%ebx)
 
        /* Wake up one thread.  */
-       movl    $FUTEX_WAKE, %ecx
+       pushl   %esi
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%esi, 0)
+       pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebp, 0)
+
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       sete    %cl
+       je      8f
+
+       movl    dep_mutex-cond_futex(%ebx), %edx
+       /* Requeue to a non-robust PI mutex if the PI bit is set and
+          the robust bit is not set.  */
+       movl    MUTEX_KIND(%edx), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       je      9f
+
+8:     subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE_OP, %ecx
        movl    $SYS_futex, %eax
        movl    $1, %edx
+       movl    $1, %esi
+       movl    $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
+       /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+          sysenter.
+       ENTER_KERNEL  */
+       int     $0x80
+       popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
+       popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
+
+       /* For any kind of error, we try again with WAKE.
+          The general test also covers running on old kernels.  */
+       cmpl    $-4095, %eax
+       jae     7f
+
+6:     xorl    %eax, %eax
+       popl    %edi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
+       popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       ret
+
+       cfi_restore_state
+
+9:     movl    $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+       movl    $SYS_futex, %eax
+       movl    $1, %edx
+       xorl    %esi, %esi
+       movl    dep_mutex-cond_futex(%ebx), %edi
+       movl    (%ebx), %ebp
+       /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+          sysenter.
+       ENTER_KERNEL  */
+       int     $0x80
+       popl    %ebp
+       popl    %esi
+
+       leal    -cond_futex(%ebx), %edi
+
+       /* For any kind of error, we try again with WAKE.
+          The general test also covers running on old kernels.  */
+       cmpl    $-4095, %eax
+       jb      4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       orl     $FUTEX_WAKE, %ecx
+
+       xorl    $(FUTEX_WAKE ^ FUTEX_WAKE_OP), %ecx
+       movl    $SYS_futex, %eax
+       /* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
+       movl    $1, %edx  */
        ENTER_KERNEL
 
        /* Unlock.  Note that at this point %edi always points to
           cond_lock.  */
 4:     LOCK
        subl    $1, (%edi)
-       jne     5f
+       je      6b
 
-6:     xorl    %eax, %eax
-       popl    %edi
-       popl    %ebx
-       ret
+       /* Unlock in loop requires wakeup.  */
+5:     movl    %edi, %eax
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
+       jmp     6b
 
        /* Initial locking failed.  */
 1:
 #if cond_lock == 0
-       movl    %edi, %ecx
+       movl    %edi, %edx
 #else
-       leal    cond_lock(%edi), %ecx
+       leal    cond_lock(%edi), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       cmpl    $-1, dep_mutex(%edi)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_lock_wait
        jmp     2b
 
-       /* Unlock in loop requires wakeup.  */
-5:     movl    %edi, %eax
-       call    __lll_mutex_unlock_wake
-       jmp     6b
+       cfi_endproc
        .size   __pthread_cond_signal, .-__pthread_cond_signal
 weak_alias(__pthread_cond_signal, pthread_cond_signal)
index b8f0d2e..c56dd77 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004,2006-2007,2009,2010 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <pthread-errnos.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
 
 
        .text
        .align  16
 __pthread_cond_timedwait:
 .LSTARTCODE:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
        pushl   %ebp
-.Lpush_ebp:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebp, 0)
        pushl   %edi
-.Lpush_edi:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%edi, 0)
        pushl   %esi
-.Lpush_esi:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%esi, 0)
        pushl   %ebx
-.Lpush_ebx:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebx, 0)
 
        movl    20(%esp), %ebx
        movl    28(%esp), %ebp
@@ -84,11 +92,12 @@ __pthread_cond_timedwait:
        addl    $1, total_seq(%ebx)
        adcl    $0, total_seq+4(%ebx)
        addl    $1, cond_futex(%ebx)
-       addl    $(1 << clock_bits), cond_nwaiters(%ebx)
+       addl    $(1 << nwaiters_shift), cond_nwaiters(%ebx)
 
-#define FRAME_SIZE 24
+#define FRAME_SIZE 32
        subl    $FRAME_SIZE, %esp
-.Lsubl:
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+       cfi_remember_state
 
        /* Get and store current wakeup_seq value.  */
        movl    wakeup_seq(%ebx), %edi
@@ -98,12 +107,14 @@ __pthread_cond_timedwait:
        movl    %edx, 16(%esp)
        movl    %eax, 20(%esp)
 
+       /* Reset the pi-requeued flag.  */
+8:     movl    $0, 24(%esp)
        /* Get the current time.  */
-8:     movl    %ebx, %edx
+       movl    %ebx, %edx
 #ifdef __NR_clock_gettime
        /* Get the clock number.  */
        movl    cond_nwaiters(%ebx), %ebx
-       andl    $((1 << clock_bits) - 1), %ebx
+       andl    $((1 << nwaiters_shift) - 1), %ebx
        /* Only clocks 0 and 1 are allowed so far.  Both are handled in the
           kernel.  */
        leal    4(%esp), %ecx
@@ -124,7 +135,7 @@ __pthread_cond_timedwait:
        /* Get the current time.  */
        leal    4(%esp), %ebx
        xorl    %ecx, %ecx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
        movl    %edx, %ebx
 
@@ -149,6 +160,7 @@ __pthread_cond_timedwait:
        movl    %edx, 8(%esp)
 
        movl    cond_futex(%ebx), %edi
+       movl    %edi, 28(%esp)
 
        /* Unlock.  */
        LOCK
@@ -163,9 +175,60 @@ __pthread_cond_timedwait:
 4:     call    __pthread_enable_asynccancel
        movl    %eax, (%esp)
 
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       sete    %cl
+       je      40f
+
+       movl    dep_mutex(%ebx), %edi
+       /* Requeue to a non-robust PI mutex if the PI bit is set and
+          the robust bit is not set.  */
+       movl    MUTEX_KIND(%edi), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       jne     40f
+
+       movl    $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+       /* The following only works like this because we only support
+          two clocks, represented using a single bit.  */
+       testl   $1, cond_nwaiters(%ebx)
+       /* XXX Need to implement using sete instead of a jump.  */
+       jne     42f
+       orl     $FUTEX_CLOCK_REALTIME, %ecx
+
+       /* Requeue-PI uses absolute timeout */
+42:    leal    (%ebp), %esi
+       movl    28(%esp), %edx
+       addl    $cond_futex, %ebx
+       movl    $SYS_futex, %eax
+       ENTER_KERNEL
+       subl    $cond_futex, %ebx
+       movl    %eax, %esi
+       /* Set the pi-requeued flag only if the kernel has returned 0. The
+          kernel does not hold the mutex on ETIMEDOUT or any other error.  */
+       cmpl    $0, %eax
+       sete    24(%esp)
+       je      41f
+
+       /* Normal and PI futexes dont mix. Use normal futex functions only
+          if the kernel does not support the PI futex functions.  */
+       cmpl    $-ENOSYS, %eax
+       jne     41f
+       xorl    %ecx, %ecx
+
+40:    subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+       addl    $FUTEX_WAIT, %ecx
+#endif
        leal    4(%esp), %esi
-       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
-       movl    %edi, %edx
+       movl    28(%esp), %edx
        addl    $cond_futex, %ebx
 .Ladd_cond_futex:
        movl    $SYS_futex, %eax
@@ -174,7 +237,7 @@ __pthread_cond_timedwait:
 .Lsub_cond_futex:
        movl    %eax, %esi
 
-       movl    (%esp), %eax
+41:    movl    (%esp), %eax
        call    __pthread_disable_asynccancel
 .LcleanupEND:
 
@@ -225,7 +288,7 @@ __pthread_cond_timedwait:
 14:    addl    $1, woken_seq(%ebx)
        adcl    $0, woken_seq+4(%ebx)
 
-24:    subl    $(1 << clock_bits), cond_nwaiters(%ebx)
+24:    subl    $(1 << nwaiters_shift), cond_nwaiters(%ebx)
 
        /* Wake up a thread which wants to destroy the condvar object.  */
        movl    total_seq(%ebx), %eax
@@ -233,12 +296,23 @@ __pthread_cond_timedwait:
        cmpl    $0xffffffff, %eax
        jne     25f
        movl    cond_nwaiters(%ebx), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
        jne     25f
 
        addl    $cond_nwaiters, %ebx
        movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_nwaiters(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $1, %edx
        ENTER_KERNEL
        subl    $cond_nwaiters, %ebx
@@ -251,11 +325,15 @@ __pthread_cond_timedwait:
 #endif
        jne     10f
 
-       /* Remove cancellation handler.  */
 11:    movl    24+FRAME_SIZE(%esp), %eax
+       /* With requeue_pi, the mutex lock is held in the kernel.  */
+       movl    24(%esp), %ecx
+       testl   %ecx, %ecx
+       jnz     27f
+
        call    __pthread_mutex_cond_lock
-       addl    $FRAME_SIZE, %esp
-.Laddl:
+26:    addl    $FRAME_SIZE, %esp
+       cfi_adjust_cfa_offset(-FRAME_SIZE);
 
        /* We return the result of the mutex_lock operation if it failed.  */
        testl   %eax, %eax
@@ -268,46 +346,118 @@ __pthread_cond_timedwait:
 #endif
 
 18:    popl    %ebx
-.Lpop_ebx:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %esi
-.Lpop_esi:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        popl    %edi
-.Lpop_edi:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
        popl    %ebp
-.Lpop_ebp:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
 
        ret
 
+       cfi_restore_state
+
+27:    call    __pthread_mutex_cond_lock_adjust
+       xorl    %eax, %eax
+       jmp     26b
+
+       cfi_adjust_cfa_offset(-FRAME_SIZE);
        /* Initial locking failed.  */
 1:
-.LSbl1:
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_lock_wait
        jmp     2b
 
+       /* The initial unlocking of the mutex failed.  */
+16:
+       LOCK
+#if cond_lock == 0
+       subl    $1, (%ebx)
+#else
+       subl    $1, cond_lock(%ebx)
+#endif
+       jne     18b
+
+       movl    %eax, %esi
+#if cond_lock == 0
+       movl    %ebx, %eax
+#else
+       leal    cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
+
+       movl    %esi, %eax
+       jmp     18b
+
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+
        /* Unlock in loop requires wakeup.  */
 3:
-.LSbl2:
 #if cond_lock == 0
        movl    %ebx, %eax
 #else
        leal    cond_lock(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
        jmp     4b
 
        /* Locking in loop failed.  */
 5:
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       call    __lll_lock_wait
        jmp     6b
 
        /* Unlock after loop requires wakeup.  */
@@ -317,37 +467,24 @@ __pthread_cond_timedwait:
 #else
        leal    cond_lock(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
-       jmp     11b
-
-       /* The initial unlocking of the mutex failed.  */
-16:
-.LSbl3:
-       LOCK
-#if cond_lock == 0
-       subl    $1, (%ebx)
-#else
-       subl    $1, cond_lock(%ebx)
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
 #endif
-       jne     18b
-
-       movl    %eax, %esi
-#if cond_lock == 0
-       movl    %ebx, %eax
-#else
-       leal    cond_lock(%ebx), %eax
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
 #endif
-       call    __lll_mutex_unlock_wake
-
-       movl    %esi, %eax
-       jmp     18b
+       call    __lll_unlock_wake
+       jmp     11b
 
 #if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
        /* clock_gettime not available.  */
-.LSbl4:
 19:    leal    4(%esp), %ebx
        xorl    %ecx, %ecx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
        movl    %edx, %ebx
 
@@ -374,7 +511,6 @@ weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
        .type   __condvar_tw_cleanup2, @function
 __condvar_tw_cleanup2:
        subl    $cond_futex, %ebx
-.LSbl5:
        .size   __condvar_tw_cleanup2, .-__condvar_tw_cleanup2
        .type   __condvar_tw_cleanup, @function
 __condvar_tw_cleanup:
@@ -392,25 +528,45 @@ __condvar_tw_cleanup:
        jz      1f
 
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       call    __lll_lock_wait
 
 1:     movl    broadcast_seq(%ebx), %eax
        cmpl    20(%esp), %eax
        jne     3f
 
-       addl    $1, wakeup_seq(%ebx)
+       /* We increment the wakeup_seq counter only if it is lower than
+          total_seq.  If this is not the case the thread was woken and
+          then canceled.  In this case we ignore the signal.  */
+       movl    total_seq(%ebx), %eax
+       movl    total_seq+4(%ebx), %edi
+       cmpl    wakeup_seq+4(%ebx), %edi
+       jb      6f
+       ja      7f
+       cmpl    wakeup_seq(%ebx), %eax
+       jbe     7f
+
+6:     addl    $1, wakeup_seq(%ebx)
        adcl    $0, wakeup_seq+4(%ebx)
-
        addl    $1, cond_futex(%ebx)
 
-       addl    $1, woken_seq(%ebx)
+7:     addl    $1, woken_seq(%ebx)
        adcl    $0, woken_seq+4(%ebx)
 
-3:     subl    $(1 << clock_bits), cond_nwaiters(%ebx)
+3:     subl    $(1 << nwaiters_shift), cond_nwaiters(%ebx)
 
        /* Wake up a thread which wants to destroy the condvar object.  */
        xorl    %edi, %edi
@@ -419,12 +575,23 @@ __condvar_tw_cleanup:
        cmpl    $0xffffffff, %eax
        jne     4f
        movl    cond_nwaiters(%ebx), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
        jne     4f
 
        addl    $cond_nwaiters, %ebx
        movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_nwaiters(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $1, %edx
        ENTER_KERNEL
        subl    $cond_nwaiters, %ebx
@@ -443,13 +610,34 @@ __condvar_tw_cleanup:
 #else
        leal    cond_lock(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
 
        /* Wake up all waiters to make sure no signal gets lost.  */
 2:     testl   %edi, %edi
        jnz     5f
        addl    $cond_futex, %ebx
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $SYS_futex, %eax
        movl    $0x7fffffff, %edx
        ENTER_KERNEL
@@ -462,4 +650,44 @@ __condvar_tw_cleanup:
        call    _Unwind_Resume
        hlt
 .LENDCODE:
+       cfi_endproc
        .size   __condvar_tw_cleanup, .-__condvar_tw_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   DW_EH_PE_omit                   # @LPStart format (omit)
+       .byte   DW_EH_PE_omit                   # @TType format (omit)
+       .byte   DW_EH_PE_sdata4                 # call-site format
+                                               # DW_EH_PE_sdata4
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .long   .LcleanupSTART-.LSTARTCODE
+       .long   .Ladd_cond_futex-.LcleanupSTART
+       .long   __condvar_tw_cleanup-.LSTARTCODE
+       .uleb128  0
+       .long   .Ladd_cond_futex-.LSTARTCODE
+       .long   .Lsub_cond_futex-.Ladd_cond_futex
+       .long   __condvar_tw_cleanup2-.LSTARTCODE
+       .uleb128  0
+       .long   .Lsub_cond_futex-.LSTARTCODE
+       .long   .LcleanupEND-.Lsub_cond_futex
+       .long   __condvar_tw_cleanup-.LSTARTCODE
+       .uleb128  0
+       .long   .LcallUR-.LSTARTCODE
+       .long   .LENDCODE-.LcallUR
+       .long   0
+       .uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  4
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+       .long   __gcc_personality_v0
+#endif
index 377a734..ab4ef0a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004,2006-2007,2009,2010 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <tcb-offsets.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#include <pthread-errnos.h>
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
 
 
        .text
        .align  16
 __pthread_cond_wait:
 .LSTARTCODE:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
 
+       pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebp, 0)
        pushl   %edi
-.Lpush_edi:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%edi, 0)
        pushl   %esi
-.Lpush_esi:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%esi, 0)
        pushl   %ebx
-.Lpush_ebx:
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset(%ebx, 0)
 
        xorl    %esi, %esi
-       movl    16(%esp), %ebx
+       movl    20(%esp), %ebx
 
        /* Get internal lock.  */
        movl    $1, %edx
@@ -64,7 +74,7 @@ __pthread_cond_wait:
        /* Store the reference to the mutex.  If there is already a
           different value in there this is a bad user bug.  */
 2:     cmpl    $-1, dep_mutex(%ebx)
-       movl    20(%esp), %eax
+       movl    24(%esp), %eax
        je      15f
        movl    %eax, dep_mutex(%ebx)
 
@@ -78,11 +88,12 @@ __pthread_cond_wait:
        addl    $1, total_seq(%ebx)
        adcl    $0, total_seq+4(%ebx)
        addl    $1, cond_futex(%ebx)
-       addl    $(1 << clock_bits), cond_nwaiters(%ebx)
+       addl    $(1 << nwaiters_shift), cond_nwaiters(%ebx)
 
-#define FRAME_SIZE 16
+#define FRAME_SIZE 20
        subl    $FRAME_SIZE, %esp
-.Lsubl:
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+       cfi_remember_state
 
        /* Get and store current wakeup_seq value.  */
        movl    wakeup_seq(%ebx), %edi
@@ -92,7 +103,9 @@ __pthread_cond_wait:
        movl    %edx, 8(%esp)
        movl    %eax, 12(%esp)
 
-8:     movl    cond_futex(%ebx), %edi
+       /* Reset the pi-requeued flag.  */
+8:     movl    $0, 16(%esp)
+       movl    cond_futex(%ebx), %ebp
 
        /* Unlock.  */
        LOCK
@@ -107,8 +120,48 @@ __pthread_cond_wait:
 4:     call    __pthread_enable_asynccancel
        movl    %eax, (%esp)
 
-       movl    %esi, %ecx      /* movl $FUTEX_WAIT, %ecx */
-       movl    %edi, %edx
+       xorl    %ecx, %ecx
+       cmpl    $-1, dep_mutex(%ebx)
+       sete    %cl
+       je      18f
+
+       movl    dep_mutex(%ebx), %edi
+       /* Requeue to a non-robust PI mutex if the PI bit is set and
+          the robust bit is not set.  */
+       movl    MUTEX_KIND(%edi), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       jne     18f
+
+       movl    $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+       movl    %ebp, %edx
+       xorl    %esi, %esi
+       addl    $cond_futex, %ebx
+       movl    $SYS_futex, %eax
+       ENTER_KERNEL
+       subl    $cond_futex, %ebx
+       /* Set the pi-requeued flag only if the kernel has returned 0. The
+          kernel does not hold the mutex on error.  */
+       cmpl    $0, %eax
+       sete    16(%esp)
+       je      19f
+
+       /* Normal and PI futexes dont mix. Use normal futex functions only
+          if the kernel does not support the PI futex functions.  */
+       cmpl    $-ENOSYS, %eax
+       jne     19f
+       xorl    %ecx, %ecx
+
+18:    subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+       addl    $FUTEX_WAIT, %ecx
+#endif
+       movl    %ebp, %edx
        addl    $cond_futex, %ebx
 .Ladd_cond_futex:
        movl    $SYS_futex, %eax
@@ -116,7 +169,7 @@ __pthread_cond_wait:
        subl    $cond_futex, %ebx
 .Lsub_cond_futex:
 
-       movl    (%esp), %eax
+19:    movl    (%esp), %eax
        call    __pthread_disable_asynccancel
 .LcleanupEND:
 
@@ -155,7 +208,7 @@ __pthread_cond_wait:
        adcl    $0, woken_seq+4(%ebx)
 
        /* Unlock */
-16:    subl    $(1 << clock_bits), cond_nwaiters(%ebx)
+16:    subl    $(1 << nwaiters_shift), cond_nwaiters(%ebx)
 
        /* Wake up a thread which wants to destroy the condvar object.  */
        movl    total_seq(%ebx), %eax
@@ -163,12 +216,23 @@ __pthread_cond_wait:
        cmpl    $0xffffffff, %eax
        jne     17f
        movl    cond_nwaiters(%ebx), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
        jne     17f
 
        addl    $cond_nwaiters, %ebx
        movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_nwaiters(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $1, %edx
        ENTER_KERNEL
        subl    $cond_nwaiters, %ebx
@@ -181,51 +245,130 @@ __pthread_cond_wait:
 #endif
        jne     10f
 
-11:    movl    20+FRAME_SIZE(%esp), %eax
+       /* With requeue_pi, the mutex lock is held in the kernel.  */
+11:    movl    24+FRAME_SIZE(%esp), %eax
+       movl    16(%esp), %ecx
+       testl   %ecx, %ecx
+       jnz     21f
+
        call    __pthread_mutex_cond_lock
-       addl    $FRAME_SIZE, %esp
-.Laddl:
+20:    addl    $FRAME_SIZE, %esp
+       cfi_adjust_cfa_offset(-FRAME_SIZE);
 
 14:    popl    %ebx
-.Lpop_ebx:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %esi
-.Lpop_esi:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        popl    %edi
-.Lpop_edi:
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
+       popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
 
        /* We return the result of the mutex_lock operation.  */
        ret
 
+       cfi_restore_state
+
+21:    call    __pthread_mutex_cond_lock_adjust
+       xorl    %eax, %eax
+       jmp     20b
+
+       cfi_adjust_cfa_offset(-FRAME_SIZE);
        /* Initial locking failed.  */
 1:
-.LSbl1:
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_lock_wait
        jmp     2b
 
-       /* Unlock in loop requires waekup.  */
+       /* The initial unlocking of the mutex failed.  */
+12:
+       LOCK
+#if cond_lock == 0
+       subl    $1, (%ebx)
+#else
+       subl    $1, cond_lock(%ebx)
+#endif
+       jne     14b
+
+       movl    %eax, %esi
+#if cond_lock == 0
+       movl    %ebx, %eax
+#else
+       leal    cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
+
+       movl    %esi, %eax
+       jmp     14b
+
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+
+       /* Unlock in loop requires wakeup.  */
 3:
-.LSbl2:
 #if cond_lock == 0
        movl    %ebx, %eax
 #else
        leal    cond_lock(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
        jmp     4b
 
        /* Locking in loop failed.  */
 5:
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       call    __lll_lock_wait
        jmp     6b
 
        /* Unlock after loop requires wakeup.  */
@@ -235,30 +378,18 @@ __pthread_cond_wait:
 #else
        leal    cond_lock(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
-       jmp     11b
-
-       /* The initial unlocking of the mutex failed.  */
-12:
-.LSbl3:
-       LOCK
-#if cond_lock == 0
-       subl    $1, (%ebx)
-#else
-       subl    $1, cond_lock(%ebx)
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
 #endif
-       jne     14b
-
-       movl    %eax, %esi
-#if cond_lock == 0
-       movl    %ebx, %eax
-#else
-       leal    cond_lock(%ebx), %eax
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
 #endif
-       call    __lll_mutex_unlock_wake
-
-       movl    %esi, %eax
-       jmp     14b
+       call    __lll_unlock_wake
+       jmp     11b
        .size   __pthread_cond_wait, .-__pthread_cond_wait
 weak_alias(__pthread_cond_wait, pthread_cond_wait)
 
@@ -284,25 +415,45 @@ __condvar_w_cleanup:
        jz      1f
 
 #if cond_lock == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    cond_lock(%ebx), %ecx
+       leal    cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
 #endif
-       call    __lll_mutex_lock_wait
+       call    __lll_lock_wait
 
 1:     movl    broadcast_seq(%ebx), %eax
        cmpl    12(%esp), %eax
        jne     3f
 
-       addl    $1, wakeup_seq(%ebx)
+       /* We increment the wakeup_seq counter only if it is lower than
+          total_seq.  If this is not the case the thread was woken and
+          then canceled.  In this case we ignore the signal.  */
+       movl    total_seq(%ebx), %eax
+       movl    total_seq+4(%ebx), %edi
+       cmpl    wakeup_seq+4(%ebx), %edi
+       jb      6f
+       ja      7f
+       cmpl    wakeup_seq(%ebx), %eax
+       jbe     7f
+
+6:     addl    $1, wakeup_seq(%ebx)
        adcl    $0, wakeup_seq+4(%ebx)
-
        addl    $1, cond_futex(%ebx)
 
-       addl    $1, woken_seq(%ebx)
+7:     addl    $1, woken_seq(%ebx)
        adcl    $0, woken_seq+4(%ebx)
 
-3:     subl    $(1 << clock_bits), cond_nwaiters(%ebx)
+3:     subl    $(1 << nwaiters_shift), cond_nwaiters(%ebx)
 
        /* Wake up a thread which wants to destroy the condvar object.  */
        xorl    %edi, %edi
@@ -311,12 +462,23 @@ __condvar_w_cleanup:
        cmpl    $0xffffffff, %eax
        jne     4f
        movl    cond_nwaiters(%ebx), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
        jne     4f
 
        addl    $cond_nwaiters, %ebx
        movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_nwaiters(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $1, %edx
        ENTER_KERNEL
        subl    $cond_nwaiters, %ebx
@@ -335,18 +497,39 @@ __condvar_w_cleanup:
 #else
        leal    cond_lock(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex(%ebx)
+       setne   %cl
+       subl    $1, %ecx
+       andl    $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+       addl    $LLL_PRIVATE, %ecx
+#endif
+       call    __lll_unlock_wake
 
        /* Wake up all waiters to make sure no signal gets lost.  */
 2:     testl   %edi, %edi
        jnz     5f
        addl    $cond_futex, %ebx
-       movl    $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+       xorl    %ecx, %ecx
+#endif
+       cmpl    $-1, dep_mutex-cond_futex(%ebx)
+       sete    %cl
+       subl    $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %ecx
+#else
+       andl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $FUTEX_WAKE, %ecx
        movl    $SYS_futex, %eax
        movl    $0x7fffffff, %edx
        ENTER_KERNEL
 
-5:     movl    20+FRAME_SIZE(%esp), %eax
+5:     movl    24+FRAME_SIZE(%esp), %eax
        call    __pthread_mutex_cond_lock
 
        movl    %esi, (%esp)
@@ -354,4 +537,54 @@ __condvar_w_cleanup:
        call    _Unwind_Resume
        hlt
 .LENDCODE:
+       cfi_endproc
        .size   __condvar_w_cleanup, .-__condvar_w_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   DW_EH_PE_omit                   # @LPStart format (omit)
+       .byte   DW_EH_PE_omit                   # @TType format (omit)
+       .byte   DW_EH_PE_sdata4                 # call-site format
+                                               # DW_EH_PE_sdata4
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .long   .LcleanupSTART-.LSTARTCODE
+       .long   .Ladd_cond_futex-.LcleanupSTART
+       .long   __condvar_w_cleanup-.LSTARTCODE
+       .uleb128  0
+       .long   .Ladd_cond_futex-.LSTARTCODE
+       .long   .Lsub_cond_futex-.Ladd_cond_futex
+       .long   __condvar_w_cleanup2-.LSTARTCODE
+       .uleb128  0
+       .long   .Lsub_cond_futex-.LSTARTCODE
+       .long   .LcleanupEND-.Lsub_cond_futex
+       .long   __condvar_w_cleanup-.LSTARTCODE
+       .uleb128  0
+       .long   .LcallUR-.LSTARTCODE
+       .long   .LENDCODE-.LcallUR
+       .long   0
+       .uleb128  0
+.Lcstend:
+
+#ifdef PIC
+       .section .gnu.linkonce.t.__i686.get_pc_thunk.cx,"ax",@progbits
+       .globl  __i686.get_pc_thunk.cx
+       .hidden __i686.get_pc_thunk.cx
+       .type   __i686.get_pc_thunk.cx,@function
+__i686.get_pc_thunk.cx:
+       movl (%esp), %ecx;
+       ret
+       .size   __i686.get_pc_thunk.cx,.-__i686.get_pc_thunk.cx
+#endif
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align 4
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+       .long   __gcc_personality_v0
+#endif
index aec79f0..d181393 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
        .type   __pthread_rwlock_rdlock,@function
        .align  16
 __pthread_rwlock_rdlock:
+       cfi_startproc
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%esi, -8)
+       cfi_offset(%ebx, -12)
 
        xorl    %esi, %esi
        movl    12(%esp), %ebx
@@ -61,7 +57,7 @@ __pthread_rwlock_rdlock:
        jne     14f
        cmpl    $0, WRITERS_QUEUED(%ebx)
        je      5f
-       cmpl    $0, FLAGS(%ebx)
+       cmpb    $0, FLAGS(%ebx)
        je      5f
 
 3:     addl    $1, READERS_QUEUED(%ebx)
@@ -77,8 +73,18 @@ __pthread_rwlock_rdlock:
 #endif
        jne     10f
 
-11:    addl    $READERS_WAKEUP, %ebx
-       movl    %esi, %ecx      /* movl $FUTEX_WAIT, %ecx */
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movzbl  PSHARED(%ebx), %ecx
+       xorl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+       movzbl  PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+       orl     $FUTEX_WAIT, %ecx
+# endif
+       xorl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $READERS_WAKEUP, %ebx
        movl    $SYS_futex, %eax
        ENTER_KERNEL
 
@@ -98,7 +104,7 @@ __pthread_rwlock_rdlock:
 13:    subl    $1, READERS_QUEUED(%ebx)
        jmp     2b
 
-5:     xorl    %ecx, %ecx
+5:     xorl    %edx, %edx
        addl    $1, NR_READERS(%ebx)
        je      8f
 9:     LOCK
@@ -110,24 +116,32 @@ __pthread_rwlock_rdlock:
        jne     6f
 7:
 
-       movl    %ecx, %eax
+       movl    %edx, %eax
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        ret
 
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%esi, -8)
+       cfi_offset(%ebx, -12)
 1:
 #if MUTEX == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    MUTEX(%ebx), %ecx
+       leal    MUTEX(%ebx), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_lock_wait
        jmp     2b
 
 14:    cmpl    %gs:TID, %eax
        jne     3b
        /* Deadlock detected.  */
-       movl    $EDEADLK, %ecx
+       movl    $EDEADLK, %edx
        jmp     9b
 
 6:
@@ -136,17 +150,18 @@ __pthread_rwlock_rdlock:
 #else
        leal    MUTEX(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_unlock_wake
        jmp     7b
 
        /* Overflow.  */
 8:     subl    $1, NR_READERS(%ebx)
-       movl    $EAGAIN, %ecx
+       movl    $EAGAIN, %edx
        jmp     9b
 
        /* Overflow.  */
 4:     subl    $1, READERS_QUEUED(%ebx)
-       movl    $EAGAIN, %ecx
+       movl    $EAGAIN, %edx
        jmp     9b
 
 10:
@@ -155,17 +170,20 @@ __pthread_rwlock_rdlock:
 #else
        leal    MUTEX(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_unlock_wake
        jmp     11b
 
 12:
 #if MUTEX == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    MUTEX(%ebx), %ecx
+       leal    MUTEX(%ebx), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_lock_wait
        jmp     13b
+       cfi_endproc
        .size   __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
 
        .globl  pthread_rwlock_rdlock
index 3717d7e..1ffdf33 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
        .type   pthread_rwlock_timedrdlock,@function
        .align  16
 pthread_rwlock_timedrdlock:
+       cfi_startproc
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
        pushl   %edi
+       cfi_adjust_cfa_offset(4)
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
        pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%esi, -8)
+       cfi_offset(%edi, -12)
+       cfi_offset(%ebx, -16)
+       cfi_offset(%ebp, -20)
        subl    $8, %esp
+       cfi_adjust_cfa_offset(8)
 
        movl    28(%esp), %ebp
        movl    32(%esp), %edi
@@ -64,7 +65,7 @@ pthread_rwlock_timedrdlock:
        jne     14f
        cmpl    $0, WRITERS_QUEUED(%ebp)
        je      5f
-       cmpl    $0, FLAGS(%ebp)
+       cmpb    $0, FLAGS(%ebp)
        je      5f
 
        /* Check the value of the timeout parameter.  */
@@ -87,7 +88,7 @@ pthread_rwlock_timedrdlock:
        /* Get current time.  */
 11:    movl    %esp, %ebx
        xorl    %ecx, %ecx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
 
        /* Compute relative timeout.  */
@@ -107,13 +108,23 @@ pthread_rwlock_timedrdlock:
        /* Futex call.  */
        movl    %ecx, (%esp)    /* Store relative timeout.  */
        movl    %edx, 4(%esp)
+
        movl    %esi, %edx
-       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movzbl  PSHARED(%ebp), %ecx
+       xorl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+       movzbl  PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+       orl     $FUTEX_WAIT, %ecx
+# endif
+       xorl    %gs:PRIVATE_FUTEX, %ecx
+#endif
        movl    %esp, %esi
        leal    READERS_WAKEUP(%ebp), %ebx
        movl    $SYS_futex, %eax
        ENTER_KERNEL
-       movl    %eax, %ecx
+       movl    %eax, %esi
 17:
 
        /* Reget the lock.  */
@@ -128,14 +139,14 @@ pthread_rwlock_timedrdlock:
        jnz     12f
 
 13:    subl    $1, READERS_QUEUED(%ebp)
-       cmpl    $-ETIMEDOUT, %ecx
+       cmpl    $-ETIMEDOUT, %esi
        jne     2b
 
-18:    movl    $ETIMEDOUT, %ecx
+18:    movl    $ETIMEDOUT, %edx
        jmp     9f
 
 
-5:     xorl    %ecx, %ecx
+5:     xorl    %edx, %edx
        addl    $1, NR_READERS(%ebp)
        je      8f
 9:     LOCK
@@ -146,27 +157,42 @@ pthread_rwlock_timedrdlock:
 #endif
        jne     6f
 
-7:     movl    %ecx, %eax
+7:     movl    %edx, %eax
 
        addl    $8, %esp
+       cfi_adjust_cfa_offset(-8)
        popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %edi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        ret
 
+       cfi_adjust_cfa_offset(24)
+       cfi_offset(%esi, -8)
+       cfi_offset(%edi, -12)
+       cfi_offset(%ebx, -16)
+       cfi_offset(%ebp, -20)
 1:
 #if MUTEX == 0
-       movl    %ebp, %ecx
+       movl    %ebp, %edx
 #else
-       leal    MUTEX(%ebp), %ecx
+       leal    MUTEX(%ebp), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_lock_wait
        jmp     2b
 
 14:    cmpl    %gs:TID, %eax
        jne     3b
-       movl    $EDEADLK, %ecx
+       movl    $EDEADLK, %edx
        jmp     9b
 
 6:
@@ -175,17 +201,18 @@ pthread_rwlock_timedrdlock:
 #else
        leal    MUTEX(%ebp), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_unlock_wake
        jmp     7b
 
        /* Overflow.  */
 8:     subl    $1, NR_READERS(%ebp)
-       movl    $EAGAIN, %ecx
+       movl    $EAGAIN, %edx
        jmp     9b
 
        /* Overflow.  */
 4:     subl    $1, READERS_QUEUED(%ebp)
-       movl    $EAGAIN, %ecx
+       movl    $EAGAIN, %edx
        jmp     9b
 
 10:
@@ -194,21 +221,24 @@ pthread_rwlock_timedrdlock:
 #else
        leal    MUTEX(%ebp), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_unlock_wake
        jmp     11b
 
 12:
 #if MUTEX == 0
-       movl    %ebp, %ecx
+       movl    %ebp, %edx
 #else
-       leal    MUTEX(%ebp), %ecx
+       leal    MUTEX(%ebp), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_lock_wait
        jmp     13b
 
-16:    movl    $-ETIMEDOUT, %ecx
+16:    movl    $-ETIMEDOUT, %esi
        jmp     17b
 
-19:    movl    $EINVAL, %ecx
+19:    movl    $EINVAL, %edx
        jmp     9b
+       cfi_endproc
        .size   pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
index 09c9e30..5826f02 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
        .type   pthread_rwlock_timedwrlock,@function
        .align  16
 pthread_rwlock_timedwrlock:
+       cfi_startproc
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
        pushl   %edi
+       cfi_adjust_cfa_offset(4)
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
        pushl   %ebp
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%esi, -8)
+       cfi_offset(%edi, -12)
+       cfi_offset(%ebx, -16)
+       cfi_offset(%ebp, -20)
        subl    $8, %esp
+       cfi_adjust_cfa_offset(8)
 
        movl    28(%esp), %ebp
        movl    32(%esp), %edi
@@ -85,7 +86,7 @@ pthread_rwlock_timedwrlock:
        /* Get current time.  */
 11:    movl    %esp, %ebx
        xorl    %ecx, %ecx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
 
        /* Compute relative timeout.  */
@@ -105,13 +106,23 @@ pthread_rwlock_timedwrlock:
        /* Futex call.  */
        movl    %ecx, (%esp)    /* Store relative timeout.  */
        movl    %edx, 4(%esp)
+
        movl    %esi, %edx
-       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movzbl  PSHARED(%ebp), %ecx
+       xorl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+       movzbl  PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+       orl     $FUTEX_WAIT, %ecx
+# endif
+       xorl    %gs:PRIVATE_FUTEX, %ecx
+#endif
        movl    %esp, %esi
        leal    WRITERS_WAKEUP(%ebp), %ebx
        movl    $SYS_futex, %eax
        ENTER_KERNEL
-       movl    %eax, %ecx
+       movl    %eax, %esi
 17:
 
        /* Reget the lock.  */
@@ -126,14 +137,14 @@ pthread_rwlock_timedwrlock:
        jnz     12f
 
 13:    subl    $1, WRITERS_QUEUED(%ebp)
-       cmpl    $-ETIMEDOUT, %ecx
+       cmpl    $-ETIMEDOUT, %esi
        jne     2b
 
-18:    movl    $ETIMEDOUT, %ecx
+18:    movl    $ETIMEDOUT, %edx
        jmp     9f
 
 
-5:     xorl    %ecx, %ecx
+5:     xorl    %edx, %edx
        movl    %gs:TID, %eax
        movl    %eax, WRITER(%ebp)
 9:     LOCK
@@ -144,27 +155,42 @@ pthread_rwlock_timedwrlock:
 #endif
        jne     6f
 
-7:     movl    %ecx, %eax
+7:     movl    %edx, %eax
 
        addl    $8, %esp
+       cfi_adjust_cfa_offset(-8)
        popl    %ebp
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebp)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %edi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        ret
 
+       cfi_adjust_cfa_offset(24)
+       cfi_offset(%esi, -8)
+       cfi_offset(%edi, -12)
+       cfi_offset(%ebx, -16)
+       cfi_offset(%ebp, -20)
 1:
 #if MUTEX == 0
-       movl    %ebp, %ecx
+       movl    %ebp, %edx
 #else
-       leal    MUTEX(%ebp), %ecx
+       leal    MUTEX(%ebp), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_lock_wait
        jmp     2b
 
 14:    cmpl    %gs:TID, %eax
        jne     3b
-20:    movl    $EDEADLK, %ecx
+20:    movl    $EDEADLK, %edx
        jmp     9b
 
 6:
@@ -173,12 +199,13 @@ pthread_rwlock_timedwrlock:
 #else
        leal    MUTEX(%ebp), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_unlock_wake
        jmp     7b
 
        /* Overflow.  */
 4:     subl    $1, WRITERS_QUEUED(%ebp)
-       movl    $EAGAIN, %ecx
+       movl    $EAGAIN, %edx
        jmp     9b
 
 10:
@@ -187,21 +214,24 @@ pthread_rwlock_timedwrlock:
 #else
        leal    MUTEX(%ebp), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_unlock_wake
        jmp     11b
 
 12:
 #if MUTEX == 0
-       movl    %ebp, %ecx
+       movl    %ebp, %edx
 #else
-       leal    MUTEX(%ebp), %ecx
+       leal    MUTEX(%ebp), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebp), %ecx
+       call    __lll_lock_wait
        jmp     13b
 
-16:    movl    $-ETIMEDOUT, %ecx
+16:    movl    $-ETIMEDOUT, %esi
        jmp     17b
 
-19:    movl    $EINVAL, %ecx
+19:    movl    $EINVAL, %edx
        jmp     9b
+       cfi_endproc
        .size   pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
index 597c82f..0130261 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
        .type   __pthread_rwlock_unlock,@function
        .align  16
 __pthread_rwlock_unlock:
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
        pushl   %edi
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
+       cfi_offset(%edi, -12)
 
        movl    12(%esp), %edi
 
@@ -60,9 +57,8 @@ __pthread_rwlock_unlock:
 
 5:     movl    $0, WRITER(%edi)
 
-       movl    $1, %ecx
+       movl    $1, %edx
        leal    WRITERS_WAKEUP(%edi), %ebx
-       movl    %ecx, %edx
        cmpl    $0, WRITERS_QUEUED(%edi)
        jne     0f
 
@@ -82,14 +78,30 @@ __pthread_rwlock_unlock:
 #endif
        jne     7f
 
-8:     movl    $SYS_futex, %eax
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movzbl  PSHARED(%edi), %ecx
+       xorl    $FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %ecx
+#else
+       movzbl  PSHARED(%edi), %ecx
+       orl     $FUTEX_WAKE, %ecx
+       xorl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       movl    $SYS_futex, %eax
        ENTER_KERNEL
 
        xorl    %eax, %eax
        popl    %edi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%edi)
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
 
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%ebx, -8)
+       cfi_offset(%edi, -12)
        .align  16
 6:     LOCK
 #if MUTEX == 0
@@ -106,31 +118,34 @@ __pthread_rwlock_unlock:
 
 1:
 #if MUTEX == 0
-       movl    %edi, %ecx
+       movl    %edi, %edx
 #else
-       leal    MUTEX(%edx), %ecx
+       leal    MUTEX(%edi), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%edi), %ecx
+       call    __lll_lock_wait
        jmp     2b
 
 3:
 #if MUTEX == 0
        movl    %edi, %eax
 #else
-       leal    MUTEX(%edx), %eax
+       leal    MUTEX(%edi), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%edi), %ecx
+       call    __lll_unlock_wake
        jmp     4b
 
 7:
 #if MUTEX == 0
        movl    %edi, %eax
 #else
-       leal    MUTEX(%edx), %eax
+       leal    MUTEX(%edi), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%edi), %ecx
+       call    __lll_unlock_wake
        jmp     8b
-
+       cfi_endproc
        .size   __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
 
        .globl  pthread_rwlock_unlock
index bb384a2..f69c49b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
        .type   __pthread_rwlock_wrlock,@function
        .align  16
 __pthread_rwlock_wrlock:
+       cfi_startproc
        pushl   %esi
+       cfi_adjust_cfa_offset(4)
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%esi, -8)
+       cfi_offset(%ebx, -12)
 
        xorl    %esi, %esi
        movl    12(%esp), %ebx
@@ -74,8 +71,18 @@ __pthread_rwlock_wrlock:
 #endif
        jne     10f
 
-11:    addl    $WRITERS_WAKEUP, %ebx
-       movl    %esi, %ecx      /* movl $FUTEX_WAIT, %ecx */
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movzbl  PSHARED(%ebx), %ecx
+       xorl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+       movzbl  PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+       orl     $FUTEX_WAIT, %ecx
+# endif
+       xorl    %gs:PRIVATE_FUTEX, %ecx
+#endif
+       addl    $WRITERS_WAKEUP, %ebx
        movl    $SYS_futex, %eax
        ENTER_KERNEL
 
@@ -95,7 +102,7 @@ __pthread_rwlock_wrlock:
 13:    subl    $1, WRITERS_QUEUED(%ebx)
        jmp     2b
 
-5:     xorl    %ecx, %ecx
+5:     xorl    %edx, %edx
        movl    %gs:TID, %eax
        movl    %eax, WRITER(%ebx)
 9:     LOCK
@@ -107,23 +114,31 @@ __pthread_rwlock_wrlock:
        jne     6f
 7:
 
-       movl    %ecx, %eax
+       movl    %edx, %eax
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        popl    %esi
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%esi)
        ret
 
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%esi, -8)
+       cfi_offset(%ebx, -12)
 1:
 #if MUTEX == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    MUTEX(%ebx), %ecx
+       leal    MUTEX(%ebx), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_lock_wait
        jmp     2b
 
 14:    cmpl    %gs:TID , %eax
        jne     3b
-       movl    $EDEADLK, %ecx
+       movl    $EDEADLK, %edx
        jmp     9b
 
 6:
@@ -132,11 +147,12 @@ __pthread_rwlock_wrlock:
 #else
        leal    MUTEX(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_unlock_wake
        jmp     7b
 
 4:     subl    $1, WRITERS_QUEUED(%ebx)
-       movl    $EAGAIN, %ecx
+       movl    $EAGAIN, %edx
        jmp     9b
 
 10:
@@ -145,17 +161,20 @@ __pthread_rwlock_wrlock:
 #else
        leal    MUTEX(%ebx), %eax
 #endif
-       call    __lll_mutex_unlock_wake
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_unlock_wake
        jmp     11b
 
 12:
 #if MUTEX == 0
-       movl    %ebx, %ecx
+       movl    %ebx, %edx
 #else
-       leal    MUTEX(%ebx), %ecx
+       leal    MUTEX(%ebx), %edx
 #endif
-       call    __lll_mutex_lock_wait
+       movzbl  PSHARED(%ebx), %ecx
+       call    __lll_lock_wait
        jmp     13b
+       cfi_endproc
        .size   __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
 
        .globl  pthread_rwlock_wrlock
index a0dc39c..b077a20 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE             1
+#include <structsem.h>
+#include <lowlevellock.h>
 
 
        .text
        .type   __new_sem_post,@function
        .align  16
 __new_sem_post:
+       cfi_startproc
        pushl   %ebx
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
 
        movl    8(%esp), %ebx
-       movl    $1, %edx
+
+#if VALUE == 0
+       movl    (%ebx), %eax
+#else
+       movl    VALUE(%ebx), %eax
+#endif
+0:     cmpl    $SEM_VALUE_MAX, %eax
+       je      3f
+       leal    1(%eax), %edx
        LOCK
-       xaddl   %edx, (%ebx)
+#if VALUE == 0
+       cmpxchgl %edx, (%ebx)
+#else
+       cmpxchgl %edx, VALUE(%ebx)
+#endif
+       jnz     0b
+
+       cmpl    $0, NWAITERS(%ebx)
+       je      2f
 
-       movl    $SYS_futex, %eax
        movl    $FUTEX_WAKE, %ecx
-       addl    $1, %edx
+       orl     PRIVATE(%ebx), %ecx
+       movl    $1, %edx
+       movl    $SYS_futex, %eax
        ENTER_KERNEL
 
        testl   %eax, %eax
        js      1f
 
-       xorl    %eax, %eax
+2:     xorl    %eax, %eax
        popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
        ret
 
+       cfi_adjust_cfa_offset(4)
+       cfi_offset(%ebx, -8)
 1:
 #ifdef __PIC__
        call    __x86.get_pc_thunk.bx
@@ -80,6 +97,35 @@ __new_sem_post:
        orl     $-1, %eax
        popl    %ebx
        ret
+
+3:
+#ifdef __PIC__
+       call    __x86.get_pc_thunk.bx
+#else
+       movl    $5f, %ebx
+5:
+#endif
+       addl    $_GLOBAL_OFFSET_TABLE_, %ebx
+#if USE___THREAD
+# ifdef NO_TLS_DIRECT_SEG_REFS
+       movl    errno@gotntpoff(%ebx), %edx
+       addl    %gs:0, %edx
+       movl    $EOVERFLOW, (%edx)
+# else
+       movl    errno@gotntpoff(%ebx), %edx
+       movl    $EOVERFLOW, %gs:(%edx)
+# endif
+#else
+       call    __errno_location@plt
+       movl    $EOVERFLOW, (%eax)
+#endif
+
+       orl     $-1, %eax
+       popl    %ebx
+       cfi_adjust_cfa_offset(-4)
+       cfi_restore(%ebx)
+       ret
+       cfi_endproc
        .size   __new_sem_post,.-__new_sem_post
 weak_alias(__new_sem_post, sem_post)
 
index 972b49f..218b12f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
 
-#define FUTEX_WAKE             1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
 
 
        .text
        .globl  sem_timedwait
        .type   sem_timedwait,@function
        .align  16
-       cfi_startproc
 sem_timedwait:
-       /* First check for cancellation.  */
-       movl    %gs:CANCELHANDLING, %eax
-       andl    $0xfffffff9, %eax
-       cmpl    $8, %eax
-       je      10f
-
+.LSTARTCODE:
        movl    4(%esp), %ecx
 
        movl    (%ecx), %eax
 2:     testl   %eax, %eax
-       je,pn   1f
+       je      1f
 
        leal    -1(%eax), %edx
        LOCK
        cmpxchgl %edx, (%ecx)
-       jne,pn  2b
+       jne     2b
 
        xorl    %eax, %eax
        ret
 
        /* Check whether the timeout value is valid.  */
 1:     pushl   %esi
-       cfi_adjust_cfa_offset(4)
+.Lpush_esi:
        pushl   %edi
-       cfi_adjust_cfa_offset(4)
+.Lpush_edi:
        pushl   %ebx
-       cfi_adjust_cfa_offset(4)
+.Lpush_ebx:
        subl    $12, %esp
-       cfi_adjust_cfa_offset(12)
+.Lsub_esp:
 
        movl    32(%esp), %edi
-       cfi_offset(7, -12)              /* %edi */
 
        /* Check for invalid nanosecond field.  */
        cmpl    $1000000000, 4(%edi)
        movl    $EINVAL, %esi
-       cfi_offset(6, -8)               /* %esi */
        jae     6f
 
-       cfi_offset(3, -16)              /* %ebx */
-7:     call    __pthread_enable_asynccancel
-       movl    %eax, 8(%esp)
+       LOCK
+       incl    NWAITERS(%ecx)
 
-       xorl    %ecx, %ecx
+7:     xorl    %ecx, %ecx
        movl    %esp, %ebx
        movl    %ecx, %edx
-       movl    $SYS_gettimeofday, %eax
+       movl    $__NR_gettimeofday, %eax
        ENTER_KERNEL
 
        /* Compute relative timeout.  */
@@ -103,19 +92,30 @@ sem_timedwait:
 
        movl    %ecx, (%esp)    /* Store relative timeout.  */
        movl    %edx, 4(%esp)
-       movl    28(%esp), %ebx
-       xorl    %ecx, %ecx
+
+.LcleanupSTART:
+       call    __pthread_enable_asynccancel
+       movl    %eax, 8(%esp)
+
+       movl    28(%esp), %ebx  /* Load semaphore address.  */
+#if FUTEX_WAIT == 0
+       movl    PRIVATE(%ebx), %ecx
+#else
+       movl    $FUTEX_WAIT, %ecx
+       orl     PRIVATE(%ebx), %ecx
+#endif
        movl    %esp, %esi
-       movl    $SYS_futex, %eax
        xorl    %edx, %edx
+       movl    $SYS_futex, %eax
        ENTER_KERNEL
        movl    %eax, %esi
 
        movl    8(%esp), %eax
        call    __pthread_disable_asynccancel
+.LcleanupEND:
 
        testl   %esi, %esi
-       je,pt   9f
+       je      9f
        cmpl    $-EWOULDBLOCK, %esi
        jne     3f
 
@@ -126,29 +126,27 @@ sem_timedwait:
        leal    -1(%eax), %ecx
        LOCK
        cmpxchgl %ecx, (%ebx)
-       jne,pn  8b
+       jne     8b
 
-       addl    $12, %esp
-       cfi_adjust_cfa_offset(-12)
        xorl    %eax, %eax
+
+       LOCK
+       decl    NWAITERS(%ebx)
+
+10:    addl    $12, %esp
+.Ladd_esp:
        popl    %ebx
-       cfi_adjust_cfa_offset(-4)
-       cfi_restore(3)
+.Lpop_ebx:
        popl    %edi
-       cfi_adjust_cfa_offset(-4)
-       cfi_restore(7)
+.Lpop_edi:
        popl    %esi
-       cfi_adjust_cfa_offset(-4)
-       cfi_restore(6)
+.Lpop_esi:
        ret
 
-       cfi_adjust_cfa_offset(24)
-       cfi_offset(6, -8)               /* %esi */
-       cfi_offset(7, -12)              /* %edi */
-       cfi_offset(3, -16)              /* %ebx */
+.Lafter_ret:
 3:     negl    %esi
 6:
-#ifdef __PIC__
+#ifdef PIC
        call    __x86.get_pc_thunk.bx
 #else
        movl    $4f, %ebx
@@ -169,25 +167,163 @@ sem_timedwait:
        movl    %esi, (%eax)
 #endif
 
-       addl    $12, %esp
-       cfi_adjust_cfa_offset(-12)
+       movl    28(%esp), %ebx  /* Load semaphore address.  */
        orl     $-1, %eax
-       popl    %ebx
-       cfi_adjust_cfa_offset(-4)
-       cfi_restore(3)
-       popl    %edi
-       cfi_adjust_cfa_offset(-4)
-       cfi_restore(7)
-       popl    %esi
-       cfi_adjust_cfa_offset(-4)
-       cfi_restore(6)
-       ret
+       jmp     10b
+       .size   sem_timedwait,.-sem_timedwait
+
 
-10:    /* Canceled.  */
-       movl    $0xffffffff, %gs:RESULT
+       .type   sem_wait_cleanup,@function
+sem_wait_cleanup:
        LOCK
-       orl     $0x10, %gs:CANCELHANDLING
-       movl    %gs:CLEANUP_JMP_BUF, %eax
-       jmp     HIDDEN_JUMPTARGET (__pthread_unwind)
-       cfi_endproc
-       .size   sem_timedwait,.-sem_timedwait
+       decl    NWAITERS(%ebx)
+       movl    %eax, (%esp)
+.LcallUR:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE:
+       .size   sem_wait_cleanup,.-sem_wait_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   0xff                            # @LPStart format (omit)
+       .byte   0xff                            # @TType format (omit)
+       .byte   0x01                            # call-site format
+                                               # DW_EH_PE_uleb128
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 sem_wait_cleanup-.LSTARTCODE
+       .uleb128  0
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+       .long   .LENDCIE-.LSTARTCIE             # Length of the CIE.
+.LSTARTCIE:
+       .long   0                               # CIE ID.
+       .byte   1                               # Version number.
+#ifdef SHARED
+       .string "zPLR"                          # NUL-terminated augmentation
+                                               # string.
+#else
+       .string "zPL"                           # NUL-terminated augmentation
+                                               # string.
+#endif
+       .uleb128 1                              # Code alignment factor.
+       .sleb128 -4                             # Data alignment factor.
+       .byte   8                               # Return address register
+                                               # column.
+#ifdef SHARED
+       .uleb128 7                              # Augmentation value length.
+       .byte   0x9b                            # Personality: DW_EH_PE_pcrel
+                                               # + DW_EH_PE_sdata4
+                                               # + DW_EH_PE_indirect
+       .long   DW.ref.__gcc_personality_v0-.
+       .byte   0x1b                            # LSDA Encoding: DW_EH_PE_pcrel
+                                               # + DW_EH_PE_sdata4.
+       .byte   0x1b                            # FDE Encoding: DW_EH_PE_pcrel
+                                               # + DW_EH_PE_sdata4.
+#else
+       .uleb128 6                              # Augmentation value length.
+       .byte   0x0                             # Personality: absolute
+       .long   __gcc_personality_v0
+       .byte   0x0                             # LSDA Encoding: absolute
+#endif
+       .byte 0x0c                              # DW_CFA_def_cfa
+       .uleb128 4
+       .uleb128 4
+       .byte   0x88                            # DW_CFA_offset, column 0x10
+       .uleb128 1
+       .align 4
+.LENDCIE:
+
+       .long   .LENDFDE-.LSTARTFDE             # Length of the FDE.
+.LSTARTFDE:
+       .long   .LSTARTFDE-.LSTARTFRAME         # CIE pointer.
+#ifdef SHARED
+       .long   .LSTARTCODE-.                   # PC-relative start address
+                                               # of the code.
+#else
+       .long   .LSTARTCODE                     # Start address of the code.
+#endif
+       .long   .LENDCODE-.LSTARTCODE           # Length of the code.
+       .uleb128 4                              # Augmentation size
+#ifdef SHARED
+       .long   .LexceptSTART-.
+#else
+       .long   .LexceptSTART
+#endif
+
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpush_esi-.LSTARTCODE
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 8
+       .byte   0x86                            # DW_CFA_offset %esi
+       .uleb128 2
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpush_edi-.Lpush_esi
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 12
+       .byte   0x87                            # DW_CFA_offset %edi
+       .uleb128 3
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpush_ebx-.Lpush_edi
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 16
+       .byte   0x83                            # DW_CFA_offset %ebx
+       .uleb128 4
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lsub_esp-.Lpush_ebx
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 28
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Ladd_esp-.Lsub_esp
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 16
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpop_ebx-.Ladd_esp
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 12
+       .byte   0xc3                            # DW_CFA_restore %ebx
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpop_edi-.Lpop_ebx
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 8
+       .byte   0xc7                            # DW_CFA_restore %edi
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpop_esi-.Lpop_edi
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 4
+       .byte   0xc6                            # DW_CFA_restore %esi
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lafter_ret-.Lpop_esi
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 28
+       .byte   0x86                            # DW_CFA_offset %esi
+       .uleb128 2
+       .byte   0x87                            # DW_CFA_offset %edi
+       .uleb128 3
+       .byte   0x83                            # DW_CFA_offset %ebx
+       .uleb128 4
+       .align  4
+.LENDFDE:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  4
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+       .long   __gcc_personality_v0
+#endif
index 7db6482..dad9685 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <lowlevellock.h>
 
        .text
 
@@ -42,7 +36,7 @@ __new_sem_trywait:
        leal    -1(%eax), %edx
        LOCK
        cmpxchgl %edx, (%ecx)
-       jne,pn  2b
+       jne     2b
        xorl    %eax, %eax
        ret
 
index c3e6cbc..b1c32ee 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
+#include <structsem.h>
+#include <lowlevellock.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE             1
 
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
 
        .text
 
        .globl  __new_sem_wait
        .type   __new_sem_wait,@function
        .align  16
-       cfi_startproc
 __new_sem_wait:
-       /* First check for cancellation.  */
-       movl    %gs:CANCELHANDLING, %eax
-       andl    $0xfffffff9, %eax
-       cmpl    $8, %eax
-       je      5f
-
+.LSTARTCODE:
        pushl   %ebx
-       cfi_adjust_cfa_offset(4)
+.Lpush_ebx:
        pushl   %esi
-       cfi_adjust_cfa_offset(4)
+.Lpush_esi:
        subl    $4, %esp
-       cfi_adjust_cfa_offset(4)
+.Lsub_esp:
 
        movl    16(%esp), %ebx
-       cfi_offset(3, -8)               /* %ebx */
 
-       cfi_offset(6, -12)              /* %esi */
-3:     movl    (%ebx), %eax
+       movl    (%ebx), %eax
 2:     testl   %eax, %eax
-       je,pn   1f
+       je      1f
 
        leal    -1(%eax), %edx
        LOCK
        cmpxchgl %edx, (%ebx)
-       jne,pn  2b
-       xorl    %eax, %eax
+       jne     2b
+7:     xorl    %eax, %eax
 
-       movl    4(%esp), %esi
-       cfi_restore(6)
+9:     movl    4(%esp), %esi
        movl    8(%esp), %ebx
-       cfi_restore(3)
        addl    $12, %esp
-       cfi_adjust_cfa_offset(-12)
+.Ladd_esp:
        ret
 
-       cfi_adjust_cfa_offset(8)
-       cfi_offset(3, -8)               /* %ebx */
-       cfi_offset(6, -12)              /* %esi */
-1:     call    __pthread_enable_asynccancel
+.Lafter_ret:
+1:     LOCK
+       incl    NWAITERS(%ebx)
+
+.LcleanupSTART:
+6:     call    __pthread_enable_asynccancel
        movl    %eax, (%esp)
 
+#if FUTEX_WAIT == 0
+       movl    PRIVATE(%ebx), %ecx
+#else
+       movl    $FUTEX_WAIT, %ecx
+       orl     PRIVATE(%ebx), %ecx
+#endif
        xorl    %esi, %esi
+       xorl    %edx, %edx
        movl    $SYS_futex, %eax
-       movl    %esi, %ecx
-       movl    %esi, %edx
        ENTER_KERNEL
        movl    %eax, %esi
 
        movl    (%esp), %eax
        call    __pthread_disable_asynccancel
+.LcleanupEND:
 
        testl   %esi, %esi
-       je      3b
+       je      3f
        cmpl    $-EWOULDBLOCK, %esi
-       je      3b
+       jne     4f
+
+3:
+       movl    (%ebx), %eax
+5:     testl   %eax, %eax
+       je      6b
+
+       leal    -1(%eax), %edx
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jne     5b
+
+       LOCK
+       decl    NWAITERS(%ebx)
+       jmp     7b
+
+4:     LOCK
+       decl    NWAITERS(%ebx)
+
        negl    %esi
 #ifdef __PIC__
        call    __x86.get_pc_thunk.bx
 #else
-       movl    $4f, %ebx
-4:
+       movl    $8f, %ebx
+8:
 #endif
        addl    $_GLOBAL_OFFSET_TABLE_, %ebx
 #if USE___THREAD
@@ -115,20 +127,143 @@ __new_sem_wait:
        movl    %esi, (%eax)
 #endif
        orl     $-1, %eax
-       movl    4(%esp), %esi
-       cfi_restore(6)
-       movl    8(%esp), %ebx
-       cfi_restore(3)
-       addl    $12, %esp
-       cfi_adjust_cfa_offset(-12)
-       ret
 
-5:     /* Canceled.  */
-       movl    $0xffffffff, %gs:RESULT
-       LOCK
-       orl     $0x10, %gs:CANCELHANDLING
-       movl    %gs:CLEANUP_JMP_BUF, %eax
-       jmp     HIDDEN_JUMPTARGET (__pthread_unwind)
-       cfi_endproc
+       jmp     9b
        .size   __new_sem_wait,.-__new_sem_wait
 weak_alias(__new_sem_wait, sem_wait)
+
+
+       .type   sem_wait_cleanup,@function
+sem_wait_cleanup:
+       LOCK
+       decl    NWAITERS(%ebx)
+       movl    %eax, (%esp)
+.LcallUR:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE:
+       .size   sem_wait_cleanup,.-sem_wait_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   0xff                            # @LPStart format (omit)
+       .byte   0xff                            # @TType format (omit)
+       .byte   0x01                            # call-site format
+                                               # DW_EH_PE_uleb128
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 sem_wait_cleanup-.LSTARTCODE
+       .uleb128  0
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+       .long   .LENDCIE-.LSTARTCIE             # Length of the CIE.
+.LSTARTCIE:
+       .long   0                               # CIE ID.
+       .byte   1                               # Version number.
+#ifdef SHARED
+       .string "zPLR"                          # NUL-terminated augmentation
+                                               # string.
+#else
+       .string "zPL"                           # NUL-terminated augmentation
+                                               # string.
+#endif
+       .uleb128 1                              # Code alignment factor.
+       .sleb128 -4                             # Data alignment factor.
+       .byte   8                               # Return address register
+                                               # column.
+#ifdef SHARED
+       .uleb128 7                              # Augmentation value length.
+       .byte   0x9b                            # Personality: DW_EH_PE_pcrel
+                                               # + DW_EH_PE_sdata4
+                                               # + DW_EH_PE_indirect
+       .long   DW.ref.__gcc_personality_v0-.
+       .byte   0x1b                            # LSDA Encoding: DW_EH_PE_pcrel
+                                               # + DW_EH_PE_sdata4.
+       .byte   0x1b                            # FDE Encoding: DW_EH_PE_pcrel
+                                               # + DW_EH_PE_sdata4.
+#else
+       .uleb128 6                              # Augmentation value length.
+       .byte   0x0                             # Personality: absolute
+       .long   __gcc_personality_v0
+       .byte   0x0                             # LSDA Encoding: absolute
+#endif
+       .byte 0x0c                              # DW_CFA_def_cfa
+       .uleb128 4
+       .uleb128 4
+       .byte   0x88                            # DW_CFA_offset, column 0x10
+       .uleb128 1
+       .align 4
+.LENDCIE:
+
+       .long   .LENDFDE-.LSTARTFDE             # Length of the FDE.
+.LSTARTFDE:
+       .long   .LSTARTFDE-.LSTARTFRAME         # CIE pointer.
+#ifdef SHARED
+       .long   .LSTARTCODE-.                   # PC-relative start address
+                                               # of the code.
+#else
+       .long   .LSTARTCODE                     # Start address of the code.
+#endif
+       .long   .LENDCODE-.LSTARTCODE           # Length of the code.
+       .uleb128 4                              # Augmentation size
+#ifdef SHARED
+       .long   .LexceptSTART-.
+#else
+       .long   .LexceptSTART
+#endif
+
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpush_ebx-.LSTARTCODE
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 8
+       .byte   0x83                            # DW_CFA_offset %ebx
+        .uleb128 2
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lpush_esi-.Lpush_ebx
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 12
+       .byte   0x86                            # DW_CFA_offset %esi
+        .uleb128 3
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lsub_esp-.Lpush_esi
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 16
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Ladd_esp-.Lsub_esp
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 4
+       .byte   0xc3                            # DW_CFA_restore %ebx
+       .byte   0xc6                            # DW_CFA_restore %esi
+       .byte   4                               # DW_CFA_advance_loc4
+       .long   .Lafter_ret-.Ladd_esp
+       .byte   14                              # DW_CFA_def_cfa_offset
+       .uleb128 16
+       .byte   0x83                            # DW_CFA_offset %ebx
+        .uleb128 2
+       .byte   0x86                            # DW_CFA_offset %esi
+        .uleb128 3
+       .align  4
+.LENDFDE:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  4
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+       .long   __gcc_personality_v0
+#endif
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..f768e16
--- /dev/null
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "../i486/lowlevelrobustlock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..f768e16
--- /dev/null
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "../i486/lowlevelrobustlock.S"
index 97f3b09..55add8b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #ifndef _LOWLEVELLOCK_H
 #define _LOWLEVELLOCK_H        1
 
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <atomic.h>
-#include <sysdep.h>
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
-/* Initializers for lock.  */
-#define LLL_LOCK_INITIALIZER           (0)
-#define LLL_LOCK_INITIALIZER_LOCKED    (1)
-
-#include <tls.h>
-
-#ifndef LOCK_INSTR
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+#  ifdef UP
+#   define LOCK_INSTR  /* nothing */
+#  else
+#   define LOCK_INSTR "lock;"
+#  endif
+# endif
+#else
+# ifndef LOCK
+#  ifdef UP
+#   define LOCK
+#  else
+#   define LOCK lock
+#  endif
+# endif
 #endif
 
 #define FUTEX_WAIT             0
 #define FUTEX_WAKE             1
+#define FUTEX_CMP_REQUEUE      4
+#define FUTEX_WAKE_OP          5
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_WAIT_REQUEUE_PI  11
+#define FUTEX_CMP_REQUEUE_PI   12
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG);                \
+       __asm__ ("andl %%gs:%P1, %0" : "+r" (__fl)                                    \
+            : "i" (offsetof (struct pthread, header.private_futex)));        \
+       __fl | (fl); }))
+# endif
+#endif
 
+#ifndef __ASSEMBLER__
 
 /* Initializer for compatibility lock.  */
-#define LLL_MUTEX_LOCK_INITIALIZER             (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED      (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS     (2)
+#define LLL_LOCK_INITIALIZER           (0)
+#define LLL_LOCK_INITIALIZER_LOCKED    (1)
+#define LLL_LOCK_INITIALIZER_WAITERS   (2)
 
 
 #ifdef __PIC__
@@ -60,247 +111,436 @@ typedef int lll_lock_t;
 # define LLL_EBX_REG   "b"
 #endif
 
-#define LLL_ENTER_KERNEL       "int $0x80\n\t"
+#ifdef I386_USE_SYSENTER
+# ifdef SHARED
+#  define LLL_ENTER_KERNEL     "call *%%gs:%P6\n\t"
+# else
+#  define LLL_ENTER_KERNEL     "call *_dl_sysinfo\n\t"
+# endif
+#else
+# define LLL_ENTER_KERNEL      "int $0x80\n\t"
+#endif
 
 /* Delay in spinlock loop.  */
-#define BUSY_WAIT_NOP          __asm__ ("rep; nop")
-
-#define lll_futex_wait(futex, val) \
-  lll_futex_timed_wait (futex, val, NULL)
-
-#define lll_futex_timed_wait(futex, val, timeout) \
+#define BUSY_WAIT_NOP  __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+       ".section       .eh_frame,\"a\",@progbits\n"            \
+"5:\t" ".long  7f-6f   # Length of Common Information Entry\n" \
+"6:\t" ".long  0x0     # CIE Identifier Tag\n\t"               \
+       ".byte  0x1     # CIE Version\n\t"                      \
+       ".ascii \"zR\\0\"       # CIE Augmentation\n\t"         \
+       ".uleb128 0x1   # CIE Code Alignment Factor\n\t"        \
+       ".sleb128 -4    # CIE Data Alignment Factor\n\t"        \
+       ".byte  0x8     # CIE RA Column\n\t"                    \
+       ".uleb128 0x1   # Augmentation size\n\t"                \
+       ".byte  0x1b    # FDE Encoding (pcrel sdata4)\n\t"      \
+       ".byte  0xc     # DW_CFA_def_cfa\n\t"                   \
+       ".uleb128 0x4\n\t"                                      \
+       ".uleb128 0x0\n\t"                                      \
+       ".align 4\n"                                            \
+"7:\t" ".long  17f-8f  # FDE Length\n"                         \
+"8:\t" ".long  8b-5b   # FDE CIE offset\n\t"                   \
+       ".long  1b-.    # FDE initial location\n\t"             \
+       ".long  4b-1b   # FDE address range\n\t"                \
+       ".uleb128 0x0   # Augmentation size\n\t"                \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x8\n\t"                                      \
+       ".uleb128 10f-9f\n"                                     \
+"9:\t" ".byte  0x78    # DW_OP_breg8\n\t"                      \
+       ".sleb128 3b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x8\n\t"                                      \
+       ".uleb128 12f-11f\n"                                    \
+"11:\t"        ".byte  0x78    # DW_OP_breg8\n\t"                      \
+       ".sleb128 3b-2b\n"                                      \
+"12:\t"        ".byte  0x40 + (3b-2b-1) # DW_CFA_advance_loc\n\t"      \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x8\n\t"                                      \
+       ".uleb128 16f-13f\n"                                    \
+"13:\t"        ".byte  0x78    # DW_OP_breg8\n\t"                      \
+       ".sleb128 15f-14f\n\t"                                  \
+       ".byte  0x0d    # DW_OP_const4s\n"                      \
+"14:\t"        ".4byte 3b-.\n\t"                                       \
+       ".byte  0x1c    # DW_OP_minus\n\t"                      \
+       ".byte  0x0d    # DW_OP_const4s\n"                      \
+"15:\t"        ".4byte 18f-.\n\t"                                      \
+       ".byte  0x22    # DW_OP_plus\n"                         \
+"16:\t"        ".align 4\n"                                            \
+"17:\t"        ".previous\n"
+
+/* Unwind info for
+   1: lea ..., ...
+   2: call ...
+   3: jmp 18f
+   4:
+   snippet.  */
+#define LLL_STUB_UNWIND_INFO_3 \
+LLL_STUB_UNWIND_INFO_START                                     \
+"10:\t"        ".byte  0x40 + (2b-1b) # DW_CFA_advance_loc\n\t"        \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+   1: lea ..., ...
+   0: movl ..., ...
+   2: call ...
+   3: jmp 18f
+   4:
+   snippet.  */
+#define LLL_STUB_UNWIND_INFO_4 \
+LLL_STUB_UNWIND_INFO_START                                     \
+"10:\t"        ".byte  0x40 + (0b-1b) # DW_CFA_advance_loc\n\t"        \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x8\n\t"                                      \
+       ".uleb128 20f-19f\n"                                    \
+"19:\t"        ".byte  0x78    # DW_OP_breg8\n\t"                      \
+       ".sleb128 3b-0b\n"                                      \
+"20:\t"        ".byte  0x40 + (2b-0b) # DW_CFA_advance_loc\n\t"        \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+  lll_futex_timed_wait (futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
   ({                                                                         \
-    int __ret;                                                       \
-    register __typeof (val) _val __asm__ ("edx") = (val);                    \
+    int __status;                                                            \
+    register __typeof (val) _val __asm__ ("edx") = (val);                            \
     __asm__ __volatile (LLL_EBX_LOAD                                         \
                      LLL_ENTER_KERNEL                                        \
                      LLL_EBX_LOAD                                            \
-                     : "=a" (__ret)                                          \
+                     : "=a" (__status)                                       \
                      : "0" (SYS_futex), LLL_EBX_REG (futex), "S" (timeout),  \
-                       "c" (FUTEX_WAIT), "d" (_val),                         \
-                       "i" (offsetof (tcbhead_t, sysinfo)));                 \
-   __ret; })
+                       "c" (__lll_private_flag (FUTEX_WAIT, private)),       \
+                       "d" (_val), "i" (offsetof (tcbhead_t, sysinfo))       \
+                     : "memory");                                            \
+    __status;                                                                \
+  })
 
 
-#define lll_futex_wake(futex, nr) \
-  ({                                                                         \
-    int __ret;                                                       \
+#define lll_futex_wake(futex, nr, private) \
+  do {                                                                       \
+    int __ignore;                                                            \
     register __typeof (nr) _nr __asm__ ("edx") = (nr);                       \
     __asm__ __volatile (LLL_EBX_LOAD                                         \
                      LLL_ENTER_KERNEL                                        \
                      LLL_EBX_LOAD                                            \
-                     : "=a" (__ret)                                          \
+                     : "=a" (__ignore)                                       \
                      : "0" (SYS_futex), LLL_EBX_REG (futex),                 \
-                       "c" (FUTEX_WAKE), "d" (_nr),                          \
+                       "c" (__lll_private_flag (FUTEX_WAKE, private)),       \
+                       "d" (_nr),                                            \
                        "i" (0) /* phony, to align next arg's number */,      \
                        "i" (offsetof (tcbhead_t, sysinfo)));                 \
-   __ret; })
-
-
-/* Does not preserve %eax and %ecx.  */
-extern int __lll_mutex_lock_wait (int val, int *__futex)
-     __attribute ((regparm (2))) attribute_hidden;
-/* Does not preserve %eax, %ecx, and %edx.  */
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
-                                      const struct timespec *abstime)
-     __attribute ((regparm (3))) attribute_hidden;
-/* Preserves all registers but %eax.  */
-extern int __lll_mutex_unlock_wake (int *__futex)
-     __attribute ((regparm (1))) attribute_hidden;
+  } while (0)
 
 
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
    after the cmpxchg instruction.  In case the operation succeded this
    value is zero.  In case the operation failed, the cmpxchg instruction
    has loaded the current value of the memory work which is guaranteed
    to be nonzero.  */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
+                          "je 0f\n\t"                                        \
+                          "lock\n"                                           \
+                          "0:\tcmpxchgl %2, %1"
+#endif
+
+#define lll_trylock(futex) \
+  ({ int ret;                                                                \
+     __asm__ __volatile (__lll_trylock_asm                                   \
+                      : "=a" (ret), "=m" (futex)                             \
+                      : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
+                        "0" (LLL_LOCK_INITIALIZER),                          \
+                        "i" (MULTIPLE_THREADS_OFFSET)                        \
+                      : "memory");                                           \
+     ret; })
+
+#define lll_robust_trylock(futex, id) \
   ({ int ret;                                                                \
      __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"                        \
                       : "=a" (ret), "=m" (futex)                             \
-                      : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-                        "0" (LLL_MUTEX_LOCK_INITIALIZER)                     \
+                      : "r" (id), "m" (futex),                               \
+                        "0" (LLL_LOCK_INITIALIZER)                           \
                       : "memory");                                           \
      ret; })
 
 
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
   ({ int ret;                                                                \
      __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"                        \
                       : "=a" (ret), "=m" (futex)                             \
-                      : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS),            \
-                         "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER)       \
+                      : "r" (LLL_LOCK_INITIALIZER_WAITERS),                  \
+                        "m" (futex), "0" (LLL_LOCK_INITIALIZER)              \
                       : "memory");                                           \
      ret; })
 
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t"                        \
+                             "je 0f\n\t"                                     \
+                             "lock\n"                                        \
+                             "0:\tcmpxchgl %1, %2\n\t"
+#endif
 
-#define lll_mutex_lock(futex) \
-  (void) ({ int ignore1, ignore2;                                            \
-           __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"              \
-                             "jnz _L_mutex_lock_%=\n\t"                      \
-                             ".subsection 1\n\t"                             \
-                             ".type _L_mutex_lock_%=,@function\n"            \
-                             "_L_mutex_lock_%=:\n\t"                         \
-                             "leal %2, %%ecx\n\t"                            \
-                             "call __lll_mutex_lock_wait\n\t"                \
-                             "jmp 1f\n\t"                                    \
-                             ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n"   \
-                             ".previous\n"                                   \
-                             "1:"                                            \
-                             : "=a" (ignore1), "=c" (ignore2), "=m" (futex)  \
-                             : "0" (0), "1" (1), "m" (futex)                 \
-                             : "memory"); })
-
-
-/* Special version of lll_mutex_lock which causes the unlock function to
-   always wakeup waiters.  */
-#define lll_mutex_cond_lock(futex) \
-  (void) ({ int ignore1, ignore2;                                            \
-           __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"              \
-                             "jnz _L_mutex_cond_lock_%=\n\t"                 \
-                             ".subsection 1\n\t"                             \
-                             ".type _L_mutex_cond_lock_%=,@function\n"       \
-                             "_L_mutex_cond_lock_%=:\n\t"                    \
-                             "leal %2, %%ecx\n\t"                            \
-                             "call __lll_mutex_lock_wait\n\t"                \
-                             "jmp 1f\n\t"                                    \
-                             ".size _L_mutex_cond_lock_%=,.-_L_mutex_cond_lock_%=\n"   \
-                             ".previous\n"                                   \
-                             "1:"                                            \
-                             : "=a" (ignore1), "=c" (ignore2), "=m" (futex)  \
-                             : "0" (0), "1" (2), "m" (futex)                 \
-                             : "memory"); })
-
-
-#define lll_mutex_timedlock(futex, timeout) \
-  ({ int _result, ignore1, ignore2;                                          \
-     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"                    \
-                      "jnz _L_mutex_timedlock_%=\n\t"                        \
+#define lll_lock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore1, ignore2;                                                 \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+        __asm__ __volatile (__lll_lock_asm_start                                     \
+                          "jnz _L_lock_%=\n\t"                               \
+                          ".subsection 1\n\t"                                \
+                          ".type _L_lock_%=,@function\n"                     \
+                          "_L_lock_%=:\n"                                    \
+                          "1:\tleal %2, %%ecx\n"                             \
+                          "2:\tcall __lll_lock_wait_private\n"               \
+                          "3:\tjmp 18f\n"                                    \
+                          "4:\t.size _L_lock_%=, 4b-1b\n\t"                  \
+                          ".previous\n"                                      \
+                          LLL_STUB_UNWIND_INFO_3                             \
+                          "18:"                                              \
+                          : "=a" (ignore1), "=c" (ignore2), "=m" (futex)     \
+                          : "0" (0), "1" (1), "m" (futex),                   \
+                            "i" (MULTIPLE_THREADS_OFFSET)                    \
+                          : "memory");                                       \
+       else                                                                  \
+        {                                                                    \
+          int ignore3;                                                       \
+          __asm__ __volatile (__lll_lock_asm_start                           \
+                            "jnz _L_lock_%=\n\t"                             \
+                            ".subsection 1\n\t"                              \
+                            ".type _L_lock_%=,@function\n"                   \
+                            "_L_lock_%=:\n"                                  \
+                            "1:\tleal %2, %%edx\n"                           \
+                            "0:\tmovl %8, %%ecx\n"                           \
+                            "2:\tcall __lll_lock_wait\n"                     \
+                            "3:\tjmp 18f\n"                                  \
+                            "4:\t.size _L_lock_%=, 4b-1b\n\t"                \
+                            ".previous\n"                                    \
+                            LLL_STUB_UNWIND_INFO_4                           \
+                            "18:"                                            \
+                            : "=a" (ignore1), "=c" (ignore2),                \
+                              "=m" (futex), "=&d" (ignore3)                  \
+                            : "1" (1), "m" (futex),                          \
+                              "i" (MULTIPLE_THREADS_OFFSET), "0" (0),        \
+                              "g" ((int) (private))                          \
+                            : "memory");                                     \
+        }                                                                    \
+    })
+
+#define lll_robust_lock(futex, id, private) \
+  ({ int __result, ignore1, ignore2;                                         \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"                            \
+                      "jnz _L_robust_lock_%=\n\t"                            \
                       ".subsection 1\n\t"                                    \
-                      ".type _L_mutex_timedlock_%=,@function\n"              \
-                      "_L_mutex_timedlock_%=:\n\t"                           \
-                      "leal %3, %%ecx\n\t"                                   \
-                      "movl %7, %%edx\n\t"                                   \
-                      "call __lll_mutex_timedlock_wait\n\t"                  \
-                      "jmp 1f\n\t"                                           \
-                      ".size _L_mutex_timedlock_%=,.-_L_mutex_timedlock_%=\n"\
+                      ".type _L_robust_lock_%=,@function\n"                  \
+                      "_L_robust_lock_%=:\n"                                 \
+                      "1:\tleal %2, %%edx\n"                                 \
+                      "0:\tmovl %7, %%ecx\n"                                 \
+                      "2:\tcall __lll_robust_lock_wait\n"                    \
+                      "3:\tjmp 18f\n"                                        \
+                      "4:\t.size _L_robust_lock_%=, 4b-1b\n\t"               \
                       ".previous\n"                                          \
-                      "1:"                                                   \
-                      : "=a" (_result), "=c" (ignore1), "=&d" (ignore2),      \
-                        "=m" (futex)                                         \
-                      : "0" (0), "1" (1), "m" (futex), "m" (timeout)         \
+                      LLL_STUB_UNWIND_INFO_4                                 \
+                      "18:"                                                  \
+                      : "=a" (__result), "=c" (ignore1), "=m" (futex),       \
+                        "=&d" (ignore2)                                      \
+                      : "0" (0), "1" (id), "m" (futex), "g" ((int) (private))\
                       : "memory");                                           \
-     _result; })
-
-
-#define lll_mutex_unlock(futex) \
-  (void) ({ int ignore;                                                              \
-            __asm__ __volatile (LOCK_INSTR "subl $1,%0\n\t"                  \
-                             "jne _L_mutex_unlock_%=\n\t"                    \
-                             ".subsection 1\n\t"                             \
-                             ".type _L_mutex_unlock_%=,@function\n"          \
-                             "_L_mutex_unlock_%=:\n\t"                       \
-                             "leal %0, %%eax\n\t"                            \
-                             "call __lll_mutex_unlock_wake\n\t"              \
-                             "jmp 1f\n\t"                                    \
-                             ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
-                             ".previous\n"                                   \
-                             "1:"                                            \
-                             : "=m" (futex), "=&a" (ignore)                  \
-                             : "m" (futex)                                   \
-                             : "memory"); })
-
-
-#define lll_mutex_islocked(futex) \
-  (futex != 0)
-
-
-extern int __lll_lock_wait (int val, int *__futex)
-     __attribute ((regparm (2))) attribute_hidden;
-extern int __lll_unlock_wake (int *__futex)
-     __attribute ((regparm (1))) attribute_hidden;
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
+     __result; })
 
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-    2  -  taken by more users */
 
+/* Special version of lll_lock which causes the unlock function to
+   always wakeup waiters.  */
+#define lll_cond_lock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore1, ignore2, ignore3;                                        \
+       __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"                  \
+                        "jnz _L_cond_lock_%=\n\t"                            \
+                        ".subsection 1\n\t"                                  \
+                        ".type _L_cond_lock_%=,@function\n"                  \
+                        "_L_cond_lock_%=:\n"                                 \
+                        "1:\tleal %2, %%edx\n"                               \
+                        "0:\tmovl %7, %%ecx\n"                               \
+                        "2:\tcall __lll_lock_wait\n"                         \
+                        "3:\tjmp 18f\n"                                      \
+                        "4:\t.size _L_cond_lock_%=, 4b-1b\n\t"               \
+                        ".previous\n"                                        \
+                        LLL_STUB_UNWIND_INFO_4                               \
+                        "18:"                                                \
+                        : "=a" (ignore1), "=c" (ignore2), "=m" (futex),      \
+                          "=&d" (ignore3)                                    \
+                        : "0" (0), "1" (2), "m" (futex), "g" ((int) (private))\
+                        : "memory");                                         \
+    })
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+  ({ int __result, ignore1, ignore2;                                         \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"                            \
+                      "jnz _L_robust_cond_lock_%=\n\t"                       \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_robust_cond_lock_%=,@function\n"             \
+                      "_L_robust_cond_lock_%=:\n"                            \
+                      "1:\tleal %2, %%edx\n"                                 \
+                      "0:\tmovl %7, %%ecx\n"                                 \
+                      "2:\tcall __lll_robust_lock_wait\n"                    \
+                      "3:\tjmp 18f\n"                                        \
+                      "4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t"          \
+                      ".previous\n"                                          \
+                      LLL_STUB_UNWIND_INFO_4                                 \
+                      "18:"                                                  \
+                      : "=a" (__result), "=c" (ignore1), "=m" (futex),       \
+                        "=&d" (ignore2)                                      \
+                      : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex),      \
+                        "g" ((int) (private))                                \
+                      : "memory");                                           \
+     __result; })
 
-#if defined NOT_IN_libc
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
-#else
-/* Special versions of the macros for use in libc itself.  They avoid
-   the lock prefix when the thread library is not used. */
 
+#define lll_timedlock(futex, timeout, private) \
+  ({ int __result, ignore1, ignore2, ignore3;                                \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"                            \
+                      "jnz _L_timedlock_%=\n\t"                              \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_timedlock_%=,@function\n"                    \
+                      "_L_timedlock_%=:\n"                                   \
+                      "1:\tleal %3, %%ecx\n"                                 \
+                      "0:\tmovl %8, %%edx\n"                                 \
+                      "2:\tcall __lll_timedlock_wait\n"                      \
+                      "3:\tjmp 18f\n"                                        \
+                      "4:\t.size _L_timedlock_%=, 4b-1b\n\t"                 \
+                      ".previous\n"                                          \
+                      LLL_STUB_UNWIND_INFO_4                                 \
+                      "18:"                                                  \
+                      : "=a" (__result), "=c" (ignore1), "=&d" (ignore2),      \
+                        "=m" (futex), "=S" (ignore3)                         \
+                      : "0" (0), "1" (1), "m" (futex), "m" (timeout),        \
+                        "4" ((int) (private))                                \
+                      : "memory");                                           \
+     __result; })
 
 
-# define lll_trylock(futex) \
-  ({ unsigned char ret;                                                              \
-     __asm__ __volatile ("cmpl $0, %%gs:%P5\n\t"                             \
-                      "je,pt 0f\n\t"                                         \
-                      "lock\n"                                               \
-                      "0:\tcmpxchgl %2, %1; setne %0"                        \
-                      : "=a" (ret), "=m" (futex)                             \
-                      : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-                        "0" (LLL_MUTEX_LOCK_INITIALIZER),                    \
-                        "i" (offsetof (tcbhead_t, multiple_threads))         \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+  ({ int __result, ignore1, ignore2, ignore3;                                \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"                            \
+                      "jnz _L_robust_timedlock_%=\n\t"                       \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_robust_timedlock_%=,@function\n"             \
+                      "_L_robust_timedlock_%=:\n"                            \
+                      "1:\tleal %3, %%ecx\n"                                 \
+                      "0:\tmovl %8, %%edx\n"                                 \
+                      "2:\tcall __lll_robust_timedlock_wait\n"               \
+                      "3:\tjmp 18f\n"                                        \
+                      "4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t"          \
+                      ".previous\n"                                          \
+                      LLL_STUB_UNWIND_INFO_4                                 \
+                      "18:"                                                  \
+                      : "=a" (__result), "=c" (ignore1), "=&d" (ignore2),      \
+                        "=m" (futex), "=S" (ignore3)                         \
+                      : "0" (0), "1" (id), "m" (futex), "m" (timeout),       \
+                        "4" ((int) (private))                                \
                       : "memory");                                           \
-     ret; })
+     __result; })
 
-
-# define lll_lock(futex) \
-  (void) ({ int ignore1, ignore2;                                            \
-           __asm__ __volatile ("cmpl $0, %%gs:%P6\n\t"                       \
-                             "je,pt 0f\n\t"                                  \
-                             "lock\n"                                        \
-                             "0:\tcmpxchgl %1, %2\n\t"                       \
-                             "jnz _L_mutex_lock_%=\n\t"                      \
-                             ".subsection 1\n\t"                             \
-                             ".type _L_mutex_lock_%=,@function\n"            \
-                             "_L_mutex_lock_%=:\n\t"                         \
-                             "leal %2, %%ecx\n\t"                            \
-                             "call __lll_mutex_lock_wait\n\t"                \
-                             "jmp 1f\n\t"                                    \
-                             ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n"   \
-                             ".previous\n"                                   \
-                             "1:"                                            \
-                             : "=a" (ignore1), "=c" (ignore2), "=m" (futex)  \
-                             : "0" (0), "1" (1), "m" (futex),                \
-                               "i" (offsetof (tcbhead_t, multiple_threads))  \
-                             : "memory"); })
-
-
-# define lll_unlock(futex) \
-  (void) ({ int ignore;                                                              \
-            __asm__ __volatile ("cmpl $0, %%gs:%P3\n\t"                              \
-                             "je,pt 0f\n\t"                                  \
-                             "lock\n"                                        \
-                             "0:\tsubl $1,%0\n\t"                    \
-                             "jne _L_mutex_unlock_%=\n\t"                    \
-                             ".subsection 1\n\t"                             \
-                             ".type _L_mutex_unlock_%=,@function\n"          \
-                             "_L_mutex_unlock_%=:\n\t"                       \
-                             "leal %0, %%eax\n\t"                            \
-                             "call __lll_mutex_unlock_wake\n\t"              \
-                             "jmp 1f\n\t"                                    \
-                             ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
-                             ".previous\n"                                   \
-                             "1:"                                            \
-                             : "=m" (futex), "=&a" (ignore)                  \
-                             : "m" (futex),                                  \
-                               "i" (offsetof (tcbhead_t, multiple_threads))  \
-                             : "memory"); })
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
+#else
+# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t"                            \
+                         "je 0f\n\t"                                         \
+                         "lock\n"                                            \
+                         "0:\tsubl $1,%0\n\t"
 #endif
 
+#define lll_unlock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore;                                                           \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+        __asm__ __volatile (__lll_unlock_asm                                 \
+                          "jne _L_unlock_%=\n\t"                             \
+                          ".subsection 1\n\t"                                \
+                          ".type _L_unlock_%=,@function\n"                   \
+                          "_L_unlock_%=:\n"                                  \
+                          "1:\tleal %0, %%eax\n"                             \
+                          "2:\tcall __lll_unlock_wake_private\n"             \
+                          "3:\tjmp 18f\n"                                    \
+                          "4:\t.size _L_unlock_%=, 4b-1b\n\t"                \
+                          ".previous\n"                                      \
+                          LLL_STUB_UNWIND_INFO_3                             \
+                          "18:"                                              \
+                          : "=m" (futex), "=&a" (ignore)                     \
+                          : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET)       \
+                          : "memory");                                       \
+       else                                                                  \
+        {                                                                    \
+          int ignore2;                                                       \
+          __asm__ __volatile (__lll_unlock_asm                               \
+                            "jne _L_unlock_%=\n\t"                           \
+                            ".subsection 1\n\t"                              \
+                            ".type _L_unlock_%=,@function\n"                 \
+                            "_L_unlock_%=:\n"                                \
+                            "1:\tleal %0, %%eax\n"                           \
+                            "0:\tmovl %5, %%ecx\n"                           \
+                            "2:\tcall __lll_unlock_wake\n"                   \
+                            "3:\tjmp 18f\n"                                  \
+                            "4:\t.size _L_unlock_%=, 4b-1b\n\t"              \
+                            ".previous\n"                                    \
+                            LLL_STUB_UNWIND_INFO_4                           \
+                            "18:"                                            \
+                            : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)  \
+                            : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex),    \
+                              "g" ((int) (private))                          \
+                            : "memory");                                     \
+        }                                                                    \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore, ignore2;                                                  \
+       __asm__ __volatile (LOCK_INSTR "andl %3, %0\n\t"                              \
+                        "jne _L_robust_unlock_%=\n\t"                        \
+                        ".subsection 1\n\t"                                  \
+                        ".type _L_robust_unlock_%=,@function\n"              \
+                        "_L_robust_unlock_%=:\n\t"                           \
+                        "1:\tleal %0, %%eax\n"                               \
+                        "0:\tmovl %5, %%ecx\n"                               \
+                        "2:\tcall __lll_unlock_wake\n"                       \
+                        "3:\tjmp 18f\n"                                      \
+                        "4:\t.size _L_robust_unlock_%=, 4b-1b\n\t"           \
+                        ".previous\n"                                        \
+                        LLL_STUB_UNWIND_INFO_4                               \
+                        "18:"                                                \
+                        : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)      \
+                        : "i" (FUTEX_WAITERS), "m" (futex),                  \
+                          "g" ((int) (private))                              \
+                        : "memory");                                         \
+    })
+
+
+#define lll_robust_dead(futex, private) \
+  (void)                                                                     \
+    ({ int __ignore;                                                         \
+       register int _nr __asm__ ("edx") = 1;                                 \
+       __asm__ __volatile (LOCK_INSTR "orl %5, (%2)\n\t"                             \
+                        LLL_EBX_LOAD                                         \
+                        LLL_ENTER_KERNEL                                     \
+                        LLL_EBX_LOAD                                         \
+                        : "=a" (__ignore)                                    \
+                        : "0" (SYS_futex), LLL_EBX_REG (&(futex)),           \
+                          "c" (__lll_private_flag (FUTEX_WAKE, private)),    \
+                          "d" (_nr), "i" (FUTEX_OWNER_DIED),                 \
+                          "i" (offsetof (tcbhead_t, sysinfo)));              \
+    })
 
 #define lll_islocked(futex) \
   (futex != LLL_LOCK_INITIALIZER)
 
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -308,21 +548,22 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
 
    The macro parameter must not have any side effect.  */
 #define lll_wait_tid(tid) \
-  ({                                                                         \
-    int __ret;                                                               \
-    register __typeof (tid) _tid __asm__ ("edx") = (tid);                    \
+  do {                                                                       \
+    int __ignore;                                                            \
+    register __typeof (tid) _tid __asm__ ("edx") = (tid);                            \
     if (_tid != 0)                                                           \
       __asm__ __volatile (LLL_EBX_LOAD                                       \
                        "1:\tmovl %1, %%eax\n\t"                              \
                        LLL_ENTER_KERNEL                                      \
                        "cmpl $0, (%%ebx)\n\t"                                \
-                       "jne,pn 1b\n\t"                                       \
+                       "jne 1b\n\t"                                          \
                        LLL_EBX_LOAD                                          \
-                       : "=&a" (__ret)                                       \
+                       : "=&a" (__ignore)                                    \
                        : "i" (SYS_futex), LLL_EBX_REG (&tid), "S" (0),       \
                          "c" (FUTEX_WAIT), "d" (_tid),                       \
-                         "i" (offsetof (tcbhead_t, sysinfo)));               \
-   __ret; })
+                         "i" (offsetof (tcbhead_t, sysinfo))                 \
+                       : "memory");                                          \
+  } while (0)
 
 extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
      __attribute__ ((regparm (2))) attribute_hidden;
@@ -338,28 +579,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
       }                                                                              \
     __result; })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     __attribute ((regparm (1))) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     __attribute ((regparm (2))) attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     __attribute ((regparm (1))) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     __attribute ((regparm (1))) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
+#endif  /* !__ASSEMBLER__ */
 
 #endif /* lowlevellock.h */
index 5bdba3f..6557359 100644 (file)
@@ -1,5 +1,5 @@
 /* Uncancelable versions of cancelable interfaces.  Linux/NPTL version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -26,20 +26,21 @@ extern int __close_nocancel (int) attribute_hidden;
 extern int __read_nocancel (int, void *, size_t) attribute_hidden;
 extern int __write_nocancel (int, const void *, size_t) attribute_hidden;
 extern pid_t __waitpid_nocancel (pid_t, int *, int) attribute_hidden;
-
-libc_hidden_proto(__open_nocancel)
-libc_hidden_proto(__close_nocancel)
-libc_hidden_proto(__read_nocancel)
-libc_hidden_proto(__write_nocancel)
-libc_hidden_proto(__waitpid_nocancel)
-
+extern int __openat_nocancel (int fd, const char *fname, int oflag,
+                               mode_t mode) attribute_hidden;
+extern int __openat64_nocancel (int fd, const char *fname, int oflag,
+                                 mode_t mode) attribute_hidden;
 #else
-#define __open_nocancel(name, ...) open (name, __VA_ARGS__)
-#define __close_nocancel(fd) close (fd)
-#define __read_nocancel(fd, buf, len) read (fd, buf, len)
-#define __write_nocancel(fd, buf, len) write (fd, buf, len)
-#define __waitpid_nocancel(pid, stat_loc, options) \
-  waitpid (pid, stat_loc, options)
+# define __open_nocancel(name, ...) __open (name, __VA_ARGS__)
+# define __close_nocancel(fd) __close (fd)
+# define __read_nocancel(fd, buf, len) __read (fd, buf, len)
+# define __write_nocancel(fd, buf, len) __write (fd, buf, len)
+# define __waitpid_nocancel(pid, stat_loc, options) \
+  __waitpid (pid, stat_loc, options)
+# define __openat_nocancel(fd, fname, oflag, mode) \
+  openat (fd, fname, oflag, mode)
+# define __openat64_nocancel(fd, fname, oflag, mode) \
+  openat64 (fd, fname, oflag, mode)
 #endif
 
 /* Uncancelable open.  */
@@ -48,6 +49,16 @@ libc_hidden_proto(__waitpid_nocancel)
 #define open_not_cancel_2(name, flags) \
    __open_nocancel (name, flags)
 
+/* Uncancelable openat.  */
+#define openat_not_cancel(fd, fname, oflag, mode) \
+  __openat_nocancel (fd, fname, oflag, mode)
+#define openat_not_cancel_3(fd, fname, oflag) \
+  __openat_nocancel (fd, fname, oflag, 0)
+#define openat64_not_cancel(fd, fname, oflag, mode) \
+  __openat64_nocancel (fd, fname, oflag, mode)
+#define openat64_not_cancel_3(fd, fname, oflag) \
+  __openat64_nocancel (fd, fname, oflag, 0)
+
 /* Uncancelable close.  */
 #define close_not_cancel(fd) \
   __close_nocancel (fd)
@@ -80,3 +91,15 @@ libc_hidden_proto(__waitpid_nocancel)
 # define waitpid_not_cancel(pid, stat_loc, options) \
   INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL)
 #endif
+
+/* Uncancelable pause.  */
+#define pause_not_cancel() \
+  __pause_nocancel ()
+
+/* Uncancelable nanosleep.  */
+#define nanosleep_not_cancel(requested_time, remaining) \
+  __nanosleep_nocancel (requested_time, remaining)
+
+/* Uncancelable sigsuspend.  */
+#define sigsuspend_not_cancel(set) \
+  __sigsuspend_nocancel (set)
index 9395389..7ab222e 100644 (file)
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <sysdep.h>
+#define _ERRNO_H       1
+#include <bits/errno.h>
+#include <bits/kernel-features.h>
 #include <tcb-offsets.h>
 
 /* Save the PID value.  */
        movl    %edx, %gs:PID;                                                \
 1:
 
-#include <../../../../../../../libc/sysdeps/linux/i386/vfork.S>
+/* Clone the calling process, but without copying the whole address space.
+   The calling process is suspended until the new process exits or is
+   replaced by a call to `execve'.  Return -1 for errors, 0 to the new process,
+   and the process ID of the new process to the old process.  */
+
+ENTRY (__vfork)
+       /* Pop the return PC value into ECX.  */
+       popl    %ecx
+
+       SAVE_PID
+
+       /* Stuff the syscall number in EAX and enter into the kernel.  */
+       movl    $SYS_ify (vfork), %eax
+       int     $0x80
+
+       RESTORE_PID
+
+       /* Jump to the return PC.  Don't jump directly since this
+          disturbs the branch target cache.  Instead push the return
+          address back on the stack.  */
+       pushl   %ecx
+
+       cmpl    $-4095, %eax
+       jae     SYSCALL_ERROR_LABEL     /* Branch forward if it failed.  */
+.Lpseudo_end:
+       ret
+PSEUDO_END (__vfork)
+
+weak_alias (__vfork, vfork)
index 5ab2c58..9a3b363 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <unwindbuf.h>
 #include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define FUTEX_WAKE     1
 
        .comm   __fork_generation, 4, 4
 
@@ -89,7 +84,16 @@ __pthread_once:
        jnz     3f      /* Different for generation -> run initializer.  */
 
        /* Somebody else got here first.  Wait.  */
-       movl    %esi, %ecx              /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %ecx
+#else
+# if FUTEX_WAIT == 0
+       movl    %gs:PRIVATE_FUTEX, %ecx
+# else
+       movl    $FUTEX_WAIT, %ecx
+       orl     %gs:PRIVATE_FUTEX, %ecx
+# endif
+#endif
        movl    $SYS_futex, %eax
        ENTER_KERNEL
        jmp     6b
@@ -130,7 +134,12 @@ __pthread_once:
 
        /* Wake up all other threads.  */
        movl    $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %ecx
+#else
        movl    $FUTEX_WAKE, %ecx
+       orl     %gs:PRIVATE_FUTEX, %ecx
+#endif
        movl    $SYS_futex, %eax
        ENTER_KERNEL
 
@@ -151,7 +160,12 @@ __pthread_once:
        movl    $0, (%ebx)
 
        movl    $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %ecx
+#else
        movl    $FUTEX_WAKE, %ecx
+       orl     %gs:PRIVATE_FUTEX, %ecx
+#endif
        movl    $SYS_futex, %eax
        ENTER_KERNEL
 
index 2c0cbe9..f68a0c0 100644 (file)
@@ -1,5 +1,5 @@
-/* Determine whether the host has multiple processors.  SH version.
-   Copyright (C) 2002 Free Software Foundation, Inc.
+/* Determine whether the host has multiple processors.  Linux version.
+   Copyright (C) 1996, 2002, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
    Boston, MA 02111-1307, USA.  */
 
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <not-cancel.h>
+
+/* Test whether the machine has more than one processor.  This is not the
+   best test but good enough.  More complicated tests would require `malloc'
+   which is not available at that time.  */
 static inline int
 is_smp_system (void)
 {
-  return 0;
+  union
+  {
+    struct utsname uts;
+    char buf[512];
+  } u;
+  char *cp;
+
+  /* Try reading the number using `sysctl' first.  */
+  if (uname (&u.uts) == 0)
+    cp = u.uts.version;
+  else
+    {
+      /* This was not successful.  Now try reading the /proc filesystem.  */
+      int fd = open_not_cancel_2 ("/proc/sys/kernel/version", O_RDONLY);
+      if (__builtin_expect (fd, 0) == -1
+         || read_not_cancel (fd, u.buf, sizeof (u.buf)) <= 0)
+       /* This also didn't work.  We give up and say it's a UP machine.  */
+       u.buf[0] = '\0';
+
+      close_not_cancel_no_status (fd);
+      cp = u.buf;
+    }
+
+  return strstr (cp, "SMP") != NULL;
 }
index f32c5bd..cb8d689 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
 
@@ -58,6 +58,7 @@
 # define SAVE_OLDTYPE_3        SAVE_OLDTYPE_2
 # define SAVE_OLDTYPE_4        SAVE_OLDTYPE_2
 # define SAVE_OLDTYPE_5        SAVE_OLDTYPE_2
+# define SAVE_OLDTYPE_6        SAVE_OLDTYPE_2
 
 # define PUSHCARGS_0   /* No arguments to push.  */
 # define DOCARGS_0     /* No arguments to frob.  */
 # define _POPCARGS_5   _POPCARGS_4; popl %edi; \
                        cfi_adjust_cfa_offset (-4); cfi_restore (edi);
 
+# define PUSHCARGS_6   _PUSHCARGS_6
+# define DOCARGS_6     _DOARGS_6 (44)
+# define POPCARGS_6    _POPCARGS_6
+# define _PUSHCARGS_6  pushl %ebp; cfi_adjust_cfa_offset (4); \
+                       cfi_rel_offset (ebp, 0); _PUSHCARGS_5
+# define _POPCARGS_6   _POPCARGS_5; popl %ebp; \
+                       cfi_adjust_cfa_offset (-4); cfi_restore (ebp);
+
 # ifdef IS_IN_libpthread
 #  define CENABLE      call __pthread_enable_asynccancel;
 #  define CDISABLE     call __pthread_disable_asynccancel
 # define POPSTATE_3    POPSTATE_2
 # define POPSTATE_4    POPSTATE_3
 # define POPSTATE_5    POPSTATE_4
+# define POPSTATE_6    POPSTATE_5
 
 # ifndef __ASSEMBLER__
 #  define SINGLE_THREAD_P \
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index dc7fb2e..b39099a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 1999, 2002, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1999,2002,2004,2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
index 07ee9d7..add20b6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -21,7 +21,6 @@
 #define _INTERNALTYPES_H       1
 
 #include <stdint.h>
-#include <sched.h>
 
 
 struct pthread_attr
@@ -77,9 +76,11 @@ struct pthread_condattr
 
 
 /* The __NWAITERS field is used as a counter and to house the number
-   of bits which represent the clock.  COND_CLOCK_BITS is the number
-   of bits reserved for the clock.  */
-#define COND_CLOCK_BITS        1
+   of bits for other purposes.  COND_CLOCK_BITS is the number
+   of bits needed to represent the ID of the clock.  COND_NWAITERS_SHIFT
+   is the number of bits reserved for other purposes like the clock.  */
+#define COND_CLOCK_BITS                1
+#define COND_NWAITERS_SHIFT    1
 
 
 /* Read-write lock variable attribute data structure.  */
@@ -97,6 +98,7 @@ struct pthread_barrier
   int lock;
   unsigned int left;
   unsigned int init_count;
+  int private;
 };
 
 
@@ -138,9 +140,16 @@ struct pthread_key_struct
 
 
 /* Semaphore variable structure.  */
-struct sem
+struct new_sem
 {
-  unsigned int count;
+  unsigned int value;
+  int private;
+  unsigned long int nwaiters;
+};
+
+struct old_sem
+{
+  unsigned int value;
 };
 
 
index c435eff..f279551 100644 (file)
@@ -1,5 +1,5 @@
 /* Clean up stack frames unwound by longjmp.  Linux version.
-   Copyright (C) 1995, 1997, 2002, 2003 Free Software Foundation, Inc.
+   Copyright (C) 1995, 1997, 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -29,11 +29,11 @@ void
 _longjmp_unwind (jmp_buf env, int val)
 {
 #ifdef SHARED
-# define fptr __libc_pthread_functions.ptr___pthread_cleanup_upto
+  if (__libc_pthread_functions_init)
+    PTHFCT_CALL (ptr___pthread_cleanup_upto, (env->__jmpbuf,
+                                             CURRENT_STACK_FRAME));
 #else
-# define fptr __pthread_cleanup_upto
+  if (__pthread_cleanup_upto != NULL)
+    __pthread_cleanup_upto (env->__jmpbuf, CURRENT_STACK_FRAME);
 #endif
-
-  if (fptr != NULL)
-    fptr (env->__jmpbuf, CURRENT_STACK_FRAME);
 }
index 4ad2528..0164377 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2005,2006,2007,2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <unistd.h>
 #include <list.h>
-#include "fork.h"
+#include <fork.h>
+#include <dl-sysdep.h>
 #include <tls.h>
 #include <string.h>
 #include <pthreadP.h>
 #include <bits/libc-lock.h>
+#include <sysdep.h>
+#include <ldsodefs.h>
 
-libc_hidden_proto(memcpy)
 
 #ifdef TLS_MULTIPLE_THREADS_IN_TCB
 void
@@ -46,14 +48,40 @@ __libc_pthread_init (
   __register_atfork (NULL, NULL, reclaim, NULL);
 
 #ifdef SHARED
-  /* We copy the content of the variable pointed to by the FUNCTIONS
-     parameter to one in libc.so since this means access to the array
-     can be done with one memory access instead of two.  */
-  memcpy (&__libc_pthread_functions, functions,
-         sizeof (__libc_pthread_functions));
+  /* Copy the function pointers into an array in libc.  This enables
+     access with just one memory reference but moreso, it prevents
+     hijacking the function pointers with just one pointer change.  We
+     "encrypt" the function pointers since we cannot write-protect the
+     array easily enough.  */
+  union ptrhack
+  {
+    struct pthread_functions pf;
+# define NPTRS (sizeof (struct pthread_functions) / sizeof (void *))
+    void *parr[NPTRS];
+  } __attribute__ ((may_alias)) const *src;
+  union ptrhack *dest;
+
+  src = (const void *) functions;
+  dest = (void *) &__libc_pthread_functions;
+
+  for (size_t cnt = 0; cnt < NPTRS; ++cnt)
+    {
+      void *p = src->parr[cnt];
+      PTR_MANGLE (p);
+      dest->parr[cnt] = p;
+    }
+  __libc_pthread_functions_init = 1;
 #endif
 
 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
   return &__libc_multiple_threads;
 #endif
 }
+
+#ifdef SHARED
+libc_freeres_fn (freeres_libptread)
+{
+  if (__libc_pthread_functions_init)
+    PTHFCT_CALL (ptr_freeres, ());
+}
+#endif
index 36e28eb..cfe22b0 100644 (file)
@@ -9,3 +9,4 @@ CURR_EVENT              offsetof (struct pthread_barrier, curr_event)
 MUTEX                  offsetof (struct pthread_barrier, lock)
 LEFT                   offsetof (struct pthread_barrier, left)
 INIT_COUNT             offsetof (struct pthread_barrier, init_count)
+PRIVATE                        offsetof (struct pthread_barrier, private)
index c5e7978..18e1ada 100644 (file)
@@ -13,4 +13,4 @@ wakeup_seq    offsetof (pthread_cond_t, __data.__wakeup_seq)
 woken_seq      offsetof (pthread_cond_t, __data.__woken_seq)
 dep_mutex      offsetof (pthread_cond_t, __data.__mutex)
 broadcast_seq  offsetof (pthread_cond_t, __data.__broadcast_seq)
-clock_bits     COND_CLOCK_BITS
+nwaiters_shift COND_NWAITERS_SHIFT
index f938914..f459bcf 100644 (file)
@@ -1,5 +1,5 @@
 /* low level locking for pthread library.  Generic futex-using version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
 
 
 void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
 {
-  do
-    {
-      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
-      if (oldval != 0)
-       lll_futex_wait (futex, 2);
-    }
-  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+  if (*futex == 2)
+    lll_futex_wait (futex, 2, LLL_PRIVATE);
+
+  while (atomic_exchange_acq (futex, 2) != 0)
+    lll_futex_wait (futex, 2, LLL_PRIVATE);
+}
+
+
+/* These functions don't get included in libc.so  */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+  if (*futex == 2)
+    lll_futex_wait (futex, 2, private);
+
+  while (atomic_exchange_acq (futex, 2) != 0)
+    lll_futex_wait (futex, 2, private);
 }
 
 
 int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
 {
   /* Reject invalid timeouts.  */
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
     return EINVAL;
 
-  do
+  /* Try locking.  */
+  while (atomic_exchange_acq (futex, 2) != 0)
     {
       struct timeval tv;
-      struct timespec rt;
 
       /* Get the current time.  */
       (void) gettimeofday (&tv, NULL);
 
       /* Compute relative timeout.  */
+      struct timespec rt;
       rt.tv_sec = abstime->tv_sec - tv.tv_sec;
       rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
       if (rt.tv_nsec < 0)
@@ -61,30 +73,12 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
          --rt.tv_sec;
        }
 
-      /* Already timed out?  */
       if (rt.tv_sec < 0)
        return ETIMEDOUT;
 
       /* Wait.  */
-      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
-      if (oldval != 0)
-       lll_futex_timed_wait (futex, 2, &rt);
+      lll_futex_timed_wait (futex, 2, &rt, private);
     }
-  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
-
-  return 0;
-}
-
-
-/* These don't get included in libc.so  */
-#ifdef IS_IN_libpthread
-int
-lll_unlock_wake_cb (int *futex)
-{
-  int val = atomic_exchange_rel (futex, 0);
-
-  if (__builtin_expect (val > 1, 0))
-    lll_futex_wake (futex, 1);
 
   return 0;
 }
@@ -105,7 +99,7 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
       struct timespec rt;
 
       /* Get the current time.  */
-      (void) gettimeofday (&tv, NULL);
+      (void) __gettimeofday (&tv, NULL);
 
       /* Compute relative timeout.  */
       rt.tv_sec = abstime->tv_sec - tv.tv_sec;
@@ -120,12 +114,12 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
       if (rt.tv_sec < 0)
        return ETIMEDOUT;
 
-      /* Wait until thread terminates.  */
-      if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT)
+      /* Wait until thread terminates.  The kernel so far does not use
+        the private futex operations for this.  */
+      if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
        return ETIMEDOUT;
     }
 
   return 0;
 }
-
 #endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym
new file mode 100644 (file)
index 0000000..2f1e9da
--- /dev/null
@@ -0,0 +1,6 @@
+#include <stddef.h>
+#include <pthreadP.h>
+
+--
+
+TID            offsetof (struct pthread, tid)
index e82c878..f50b25b 100644 (file)
@@ -1,6 +1,7 @@
 #include <stddef.h>
 #include <stdio.h>
 #include <bits/pthreadtypes.h>
+#include <bits/wordsize.h>
 
 --
 
@@ -12,3 +13,4 @@ READERS_QUEUED        offsetof (pthread_rwlock_t, __data.__nr_readers_queued)
 WRITERS_QUEUED offsetof (pthread_rwlock_t, __data.__nr_writers_queued)
 FLAGS          offsetof (pthread_rwlock_t, __data.__flags)
 WRITER         offsetof (pthread_rwlock_t, __data.__writer)
+PSHARED                offsetof (pthread_rwlock_t, __data.__shared)
index f112b8a..166a6c6 100644 (file)
@@ -1,5 +1,5 @@
 /* Machine-specific pthread type layouts.  MIPS version.
-   Copyright (C) 2005 Free Software Foundation, Inc.
+   Copyright (C) 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -20,7 +20,7 @@
 #ifndef _BITS_PTHREADTYPES_H
 #define _BITS_PTHREADTYPES_H   1
 
-#include <sgidefs.h>
+#include <endian.h>
 
 #if _MIPS_SIM == _ABI64
 # define __SIZEOF_PTHREAD_ATTR_T 56
@@ -56,6 +56,7 @@ typedef union
   long int __align;
 } pthread_attr_t;
 
+
 #if _MIPS_SIM == _ABI64
 typedef struct __pthread_internal_list
 {
@@ -69,6 +70,7 @@ typedef struct __pthread_internal_slist
 } __pthread_slist_t;
 #endif
 
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is deliberately not exposed.  */
 typedef union
@@ -87,7 +89,7 @@ typedef union
 #if _MIPS_SIM == _ABI64
     int __spins;
     __pthread_list_t __list;
-# define __PTHREAD_MUTEX_HAVE_PREV      1
+# define __PTHREAD_MUTEX_HAVE_PREV     1
 #else
     unsigned int __nusers;
     __extension__ union
@@ -157,9 +159,9 @@ typedef union
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
     int __writer;
-    int __pad1;
+    int __shared;
+    unsigned long int __pad1;
     unsigned long int __pad2;
-    unsigned long int __pad3;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
     unsigned int __flags;
@@ -173,9 +175,21 @@ typedef union
     unsigned int __writer_wakeup;
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
+#if __BYTE_ORDER == __BIG_ENDIAN
+    unsigned char __pad1;
+    unsigned char __pad2;
+    unsigned char __shared;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
-    unsigned int __flags;
+    unsigned char __flags;
+#else
+    /* FLAGS must stay at this position in the structure to maintain
+       binary compatibility.  */
+    unsigned char __flags;
+    unsigned char __shared;
+    unsigned char __pad1;
+    unsigned char __pad2;
+#endif
     int __writer;
   } __data;
 # endif
index c4440f9..af43a60 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -29,9 +29,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
index 7edb287..01bcf41 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008,
+   2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
 #include <sysdep.h>
-
+#include <bits/kernel-features.h>
 
 #define FUTEX_WAIT             0
 #define FUTEX_WAKE             1
 #define FUTEX_REQUEUE          3
 #define FUTEX_CMP_REQUEUE      4
-
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
-#define lll_futex_wait(futexp, val) \
+#define FUTEX_WAKE_OP          5
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)                               \
+             & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif              
+#endif
+
+
+#define lll_futex_wait(futexp, val, private) \
+  lll_futex_timed_wait(futexp, val, NULL, private)
+
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), 0);                \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp),                      \
+                             __lll_private_flag (FUTEX_WAIT, private),       \
+                             (val), (timespec));                             \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
   })
 
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_wake(futexp, nr, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), (timespec));       \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp),                      \
+                             __lll_private_flag (FUTEX_WAKE, private),       \
+                             (nr), 0);       \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
   })
 
-#define lll_futex_wake(futexp, nr) \
+#define lll_robust_dead(futexv, private) \
+  do                                                                         \
+    {                                                                        \
+      int *__futexp = &(futexv);                                             \
+      atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
+      lll_futex_wake (__futexp, 1, private);                                 \
+    }                                                                        \
+  while (0)
+
+/* Returns non-zero if error happened, zero if success.  */
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAKE, (nr), 0);                 \
-    INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp),                      \
+                             __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+                             (nr_wake), (nr_move), (mutex), (val));          \
+    INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,                               \
-                             (futexp), FUTEX_CMP_REQUEUE, (nr_wake),         \
-                             (nr_move), (mutex), (val));                     \
+                                                                             \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE_OP, private),    \
+                             (nr_wake), (nr_wake2), (futexp2),               \
+                             FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);                 \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
-
 static inline int __attribute__((always_inline))
-__lll_mutex_trylock(int *futex)
+__lll_trylock(int *futex)
 {
   return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
 }
-#define lll_mutex_trylock(lock)        __lll_mutex_trylock (&(lock))
+#define lll_trylock(lock)      __lll_trylock (&(lock))
 
 
 static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock(int *futex)
+__lll_cond_trylock(int *futex)
 {
   return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
 }
-#define lll_mutex_cond_trylock(lock)   __lll_mutex_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-
-static inline void __attribute__((always_inline))
-__lll_mutex_lock(int *futex)
+static inline int __attribute__((always_inline))
+__lll_robust_trylock(int *futex, int id)
 {
-  if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    __lll_lock_wait (futex);
+  return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
 }
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_robust_trylock(lock, id) \
+  __lll_robust_trylock (&(lock), id)
+
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+#define __lll_lock(futex, private)                                           \
+  ((void) ({                                                                 \
+    int *__futex = (futex);                                                  \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex,      \
+                                                               1, 0), 0))    \
+      {                                                                              \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+         __lll_lock_wait_private (__futex);                                  \
+       else                                                                  \
+         __lll_lock_wait (__futex, private);                                 \
+      }                                                                              \
+  }))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+#define __lll_robust_lock(futex, id, private)                                \
+  ({                                                                         \
+    int *__futex = (futex);                                                  \
+    int __val = 0;                                                           \
+                                                                             \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_lock_wait (__futex, private);                     \
+    __val;                                                                   \
+  })
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 
 static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
 {
   if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
-    __lll_lock_wait (futex);
+    __lll_lock_wait (futex, private);
 }
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
 
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
-       attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+                                int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+                                       int private) attribute_hidden;
 
 static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    result = __lll_timedlock_wait (futex, abstime);
+    result = __lll_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
 
 
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
-{
-  int val = atomic_exchange_rel (futex, 0);
-  if (__builtin_expect (val > 1, 0))
-    lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
+static inline int __attribute__ ((always_inline))
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+                       int id, int private)
 {
-  (void) atomic_exchange_rel (futex, 0);
-  lll_futex_wake (futex, 1);
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+    result = __lll_robust_timedlock_wait (futex, abstime, private);
+  return result;
 }
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
-
-
-#define lll_mutex_islocked(futex) \
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
+
+
+#define __lll_unlock(futex, private)                                         \
+  ((void) ({                                                                 \
+    int *__futex = (futex);                                                  \
+    int __val = atomic_exchange_rel (__futex, 0);                            \
+                                                                             \
+    if (__builtin_expect (__val > 1, 0))                                     \
+      lll_futex_wake (__futex, 1, private);                                  \
+  }))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
+
+
+#define __lll_robust_unlock(futex, private)                                  \
+  ((void) ({                                                                 \
+    int *__futex = (futex);                                                  \
+    int __val = atomic_exchange_rel (__futex, 0);                            \
+                                                                             \
+    if (__builtin_expect (__val & FUTEX_WAITERS, 0))                         \
+      lll_futex_wake (__futex, 1, private);                                  \
+  }))
+#define lll_robust_unlock(futex, private) \
+  __lll_robust_unlock(&(futex), private)
+
+
+#define lll_islocked(futex) \
   (futex != 0)
 
 
 /* Our internal lock implementation is identical to the binary-compatible
    mutex implementation. */
 
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER           (0)
 #define LLL_LOCK_INITIALIZER_LOCKED    (1)
 
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
 /* The states of a lock are:
     0  -  untaken
     1  -  taken by one user
    >1  -  taken by more users */
 
-#define lll_trylock(lock)      lll_mutex_trylock (lock)
-#define lll_lock(lock)         lll_mutex_lock (lock)
-#define lll_unlock(lock)       lll_mutex_unlock (lock)
-#define lll_islocked(lock)     lll_mutex_islocked (lock)
-
 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
    afterwards. */
 #define lll_wait_tid(tid) \
-  do {                                 \
-    __typeof (tid) __tid;              \
-    while ((__tid = (tid)) != 0)       \
-      lll_futex_wait (&(tid), __tid);  \
+  do {                                                 \
+    __typeof (tid) __tid;                              \
+    while ((__tid = (tid)) != 0)                       \
+      lll_futex_wait (&(tid), __tid, LLL_SHARED);      \
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -191,26 +292,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;                                             \
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif /* lowlevellock.h */
index 649b752..ddfd32b 100644 (file)
@@ -30,7 +30,7 @@ clear_once_control (void *arg)
   pthread_once_t *once_control = (pthread_once_t *) arg;
 
   *once_control = 0;
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 }
 
 
@@ -65,7 +65,7 @@ __pthread_once (once_control, init_routine)
          if (((oldval ^ newval) & -4) == 0)
            {
              /* Same generation, some other thread was faster. Wait.  */
-             lll_futex_wait (once_control, newval);
+             lll_futex_wait (once_control, newval, LLL_PRIVATE);
              continue;
            }
        }
@@ -84,7 +84,7 @@ __pthread_once (once_control, init_routine)
       atomic_increment (once_control);
 
       /* Wake up all other threads.  */
-      lll_futex_wake (once_control, INT_MAX);
+      lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
       break;
     }
 
index 5fee892..1cf625f 100644 (file)
 
 #if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
 
-#ifdef __PIC__
+# ifdef __PIC__
+#  define PSEUDO_CPLOAD .cpload t9;
+#  define PSEUDO_ERRJMP la t9, __syscall_error; jr t9;
+#  define PSEUDO_SAVEGP sw gp, 32(sp); cfi_rel_offset (gp, 32);
+#  define PSEUDO_LOADGP lw gp, 32(sp);
+# else
+#  define PSEUDO_CPLOAD
+#  define PSEUDO_ERRJMP j __syscall_error;
+#  define PSEUDO_SAVEGP
+#  define PSEUDO_LOADGP
+# endif
+
 # undef PSEUDO
 # define PSEUDO(name, syscall_name, args)                                    \
       .align 2;                                                                      \
   L(pseudo_start):                                                           \
       cfi_startproc;                                                         \
-  99: la t9,__syscall_error;                                                 \
-      jr t9;                                                                 \
+  99: PSEUDO_ERRJMP                                                          \
   .type __##syscall_name##_nocancel, @function;                                      \
   .globl __##syscall_name##_nocancel;                                        \
   __##syscall_name##_nocancel:                                               \
     .set noreorder;                                                          \
-    .cpload t9;                                                                      \
+    PSEUDO_CPLOAD                                                            \
     li v0, SYS_ify(syscall_name);                                            \
     syscall;                                                                 \
     .set reorder;                                                            \
-    bne a3, zero, SYSCALL_ERROR_LABEL;                                       \
+    bne a3, zero, 99b;                                                       \
     ret;                                                                     \
   .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;           \
   ENTRY (name)                                                               \
     .set noreorder;                                                          \
-    .cpload t9;                                                                      \
+    PSEUDO_CPLOAD                                                            \
     .set reorder;                                                            \
     SINGLE_THREAD_P(v1);                                                     \
     bne zero, v1, L(pseudo_cancel);                                          \
     li v0, SYS_ify(syscall_name);                                            \
     syscall;                                                                 \
     .set reorder;                                                            \
-    bne a3, zero, SYSCALL_ERROR_LABEL;                                       \
+    bne a3, zero, 99b;                                                       \
     ret;                                                                     \
   L(pseudo_cancel):                                                          \
     SAVESTK_##args;                                                          \
     sw ra, 28(sp);                                                           \
     cfi_rel_offset (ra, 28);                                                 \
-    sw gp, 32(sp);                                                           \
-    cfi_rel_offset (gp, 32);                                                 \
+    PSEUDO_SAVEGP                                                            \
     PUSHARGS_##args;                   /* save syscall args */               \
     CENABLE;                                                                 \
-    lw gp, 32(sp);                                                           \
+    PSEUDO_LOADGP                                                            \
     sw v0, 44(sp);                     /* save mask */                       \
     POPARGS_##args;                    /* restore syscall args */            \
     .set noreorder;                                                          \
     sw a3, 40(sp);                     /* save syscall error flag */         \
     lw a0, 44(sp);                     /* pass mask as arg1 */               \
     CDISABLE;                                                                \
-    lw gp, 32(sp);                                                           \
+    PSEUDO_LOADGP                                                            \
     lw v0, 36(sp);                     /* restore syscall result */          \
     lw a3, 40(sp);                     /* restore syscall error flag */      \
     lw ra, 28(sp);                     /* restore return address */          \
     .set noreorder;                                                          \
-    bne a3, zero, SYSCALL_ERROR_LABEL;                                       \
+    bne a3, zero, 99b;                                                       \
      RESTORESTK;                                                             \
   L(pseudo_end):                                                             \
     .set reorder;
@@ -87,8 +96,6 @@
 # undef PSEUDO_END
 # define PSEUDO_END(sym) cfi_endproc; .end sym; .size sym,.-sym
 
-#endif
-
 # define PUSHARGS_0    /* nothing to do */
 # define PUSHARGS_1    PUSHARGS_0 sw a0, 0(sp); cfi_rel_offset (a0, 0);
 # define PUSHARGS_2    PUSHARGS_1 sw a1, 4(sp); cfi_rel_offset (a1, 4);
 # define RESTORESTK    addu sp, STKSPACE; cfi_adjust_cfa_offset(-STKSPACE)
 
 
+# ifdef __PIC__
 /* We use jalr rather than jal.  This means that the assembler will not
    automatically restore $gp (in case libc has multiple GOTs) so we must
    do it manually - which we have to do anyway since we don't use .cprestore.
    It also shuts up the assembler warning about not using .cprestore.  */
+#  define PSEUDO_JMP(sym) la t9, sym; jalr t9;
+# else
+#  define PSEUDO_JMP(sym) jal sym;
+# endif
+
 # ifdef IS_IN_libpthread
-#  define CENABLE      la t9, __pthread_enable_asynccancel; jalr t9;
-#  define CDISABLE     la t9, __pthread_disable_asynccancel; jalr t9;
+#  define CENABLE      PSEUDO_JMP (__pthread_enable_asynccancel)
+#  define CDISABLE     PSEUDO_JMP (__pthread_disable_asynccancel)
 # elif defined IS_IN_librt
-#  define CENABLE      la t9, __librt_enable_asynccancel; jalr t9;
-#  define CDISABLE     la t9, __librt_disable_asynccancel; jalr t9;
+#  define CENABLE      PSEUDO_JMP (__librt_enable_asynccancel)
+#  define CDISABLE     PSEUDO_JMP (__librt_disable_asynccancel)
 # else
-#  define CENABLE      la t9, __libc_enable_asynccancel; jalr t9;
-#  define CDISABLE     la t9, __libc_disable_asynccancel; jalr t9;
+#  define CENABLE      PSEUDO_JMP (__libc_enable_asynccancel)
+#  define CDISABLE     PSEUDO_JMP (__libc_disable_asynccancel)
 # endif
 
 # ifndef __ASSEMBLER__
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index 9d16fee..188040e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2004, 2005, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contribute by Ulrich Drepper <drepper@redhat.com>, 2004.
 
@@ -29,6 +29,7 @@
 #include <unistd.h>
 #include <sys/socket.h>
 #include <not-cancel.h>
+#include <bits/kernel-features.h>
 
 
 #ifdef __NR_mq_notify
@@ -157,7 +158,7 @@ init_mq_netlink (void)
   if (netlink_socket == -1)
     {
       /* Just a normal netlink socket, not bound.  */
-      netlink_socket = socket (AF_NETLINK, SOCK_RAW, 0);
+         netlink_socket = socket (AF_NETLINK, SOCK_RAW, 0);
       /* No need to do more if we have no socket.  */
       if (netlink_socket == -1)
        return;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions
deleted file mode 100644 (file)
index 9977847..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-libpthread {
-  GLIBC_2.3.4 {
-    longjmp; siglongjmp;
-  }
-}
index c94ed0c..c0b59c3 100644 (file)
@@ -1,5 +1,5 @@
 /* Machine-specific pthread type layouts.  PowerPC version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -57,6 +57,7 @@ typedef union
   long int __align;
 } pthread_attr_t;
 
+
 #if __WORDSIZE == 64
 typedef struct __pthread_internal_list
 {
@@ -70,6 +71,7 @@ typedef struct __pthread_internal_slist
 } __pthread_slist_t;
 #endif
 
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is deliberately not exposed.  */
 typedef union
@@ -88,7 +90,7 @@ typedef union
 #if __WORDSIZE == 64
     int __spins;
     __pthread_list_t __list;
-# define __PTHREAD_MUTEX_HAVE_PREV      1
+# define __PTHREAD_MUTEX_HAVE_PREV     1
 #else
     unsigned int __nusers;
     __extension__ union
@@ -158,9 +160,9 @@ typedef union
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
     int __writer;
-    int __pad1;
+    int __shared;
+    unsigned long int __pad1;
     unsigned long int __pad2;
-    unsigned long int __pad3;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
     unsigned int __flags;
@@ -174,9 +176,12 @@ typedef union
     unsigned int __writer_wakeup;
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
+    unsigned char __pad1;
+    unsigned char __pad2;
+    unsigned char __shared;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
-    unsigned int __flags;
+    unsigned char __flags;
     int __writer;
   } __data;
 # endif
index 8123b41..c7f121b 100644 (file)
@@ -33,9 +33,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
index 1f2f481..66c02cb 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006-2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -24,7 +24,7 @@
 #include <sys/param.h>
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
-
+#include <kernel-features.h>
 
 #ifndef __NR_futex
 # define __NR_futex            221
 #define FUTEX_WAKE             1
 #define FUTEX_REQUEUE          3
 #define FUTEX_CMP_REQUEUE      4
+#define FUTEX_WAKE_OP          5
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)                               \
+             & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
 
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define lll_futex_wait(futexp, val, private) \
+  lll_futex_timed_wait (futexp, val, NULL, private)
 
-#define lll_futex_wait(futexp, val) \
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
                                                                              \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), 0);                \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAIT, private),       \
+                             (val), (timespec));                             \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
   })
 
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_wake(futexp, nr, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
                                                                              \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAIT, (val), (timespec));       \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE, private),       \
+                             (nr), 0);                                       \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
   })
 
-#define lll_futex_wake(futexp, nr) \
+#define lll_robust_dead(futexv, private) \
+  do                                                                         \
+    {                                                                        \
+      INTERNAL_SYSCALL_DECL (__err);                                         \
+      int *__futexp = &(futexv);                                             \
+                                                                             \
+      atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
+      INTERNAL_SYSCALL (futex, __err, 4, __futexp,                           \
+                       __lll_private_flag (FUTEX_WAKE, private), 1, 0);      \
+    }                                                                        \
+  while (0)
+
+/* Returns non-zero if error happened, zero if success.  */
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
                                                                              \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4,                               \
-                             (futexp), FUTEX_WAKE, (nr), 0);                 \
-    INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;                \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+                             (nr_wake), (nr_move), (mutex), (val));          \
+    INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
                                                                              \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,                               \
-                             (futexp), FUTEX_CMP_REQUEUE, (nr_wake),         \
-                             (nr_move), (mutex), (val));                     \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAKE_OP, private),    \
+                             (nr_wake), (nr_wake2), (futexp2),               \
+                             FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);                 \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
+
 #ifdef UP
 # define __lll_acq_instr       ""
 # define __lll_rel_instr       ""
 #else
 # define __lll_acq_instr       "isync"
-# define __lll_rel_instr       "sync"
+# ifdef _ARCH_PWR4
+/*
+ * Newer powerpc64 processors support the new "light weight" sync (lwsync)
+ * So if the build is using -mcpu=[power4,power5,power5+,970] we can
+ * safely use lwsync.
+ */
+#  define __lll_rel_instr      "lwsync"
+# else
+/*
+ * Older powerpc32 processors don't support the new "light weight"
+ * sync (lwsync).  So the only safe option is to use normal sync
+ * for all powerpc32 applications.
+ */
+#  define __lll_rel_instr      "sync"
+# endif
 #endif
 
-/* Set *futex to 1 if it is 0, atomically.  Returns the old value */
-#define __lll_trylock(futex) \
+/* Set *futex to ID if it is 0, atomically.  Returns the old value */
+#define __lll_robust_trylock(futex, id) \
   ({ int __val;                                                                      \
-     __asm __volatile ("1:     lwarx   %0,0,%2\n"                            \
+     __asm __volatile ("1:     lwarx   %0,0,%2" MUTEX_HINT_ACQ "\n"          \
                       "        cmpwi   0,%0,0\n"                             \
                       "        bne     2f\n"                                 \
                       "        stwcx.  %3,0,%2\n"                            \
                       "        bne-    1b\n"                                 \
                       "2:      " __lll_acq_instr                             \
                       : "=&r" (__val), "=m" (*futex)                         \
-                      : "r" (futex), "r" (1), "m" (*futex)                   \
+                      : "r" (futex), "r" (id), "m" (*futex)                  \
                       : "cr0", "memory");                                    \
      __val;                                                                  \
   })
 
-#define lll_mutex_trylock(lock)        __lll_trylock (&(lock))
+#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+
+/* Set *futex to 1 if it is 0, atomically.  Returns the old value */
+#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
+
+#define lll_trylock(lock)      __lll_trylock (&(lock))
 
 /* Set *futex to 2 if it is 0, atomically.  Returns the old value */
-#define __lll_cond_trylock(futex) \
-  ({ int __val;                                                                      \
-     __asm __volatile ("1:     lwarx   %0,0,%2\n"                            \
-                      "        cmpwi   0,%0,0\n"                             \
-                      "        bne     2f\n"                                 \
-                      "        stwcx.  %3,0,%2\n"                            \
-                      "        bne-    1b\n"                                 \
-                      "2:      " __lll_acq_instr                             \
-                      : "=&r" (__val), "=m" (*futex)                         \
-                      : "r" (futex), "r" (2), "m" (*futex)                   \
-                      : "cr0", "memory");                                    \
-     __val;                                                                  \
-  })
-#define lll_mutex_cond_trylock(lock)   __lll_cond_trylock (&(lock))
+#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
 
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
 
-#define lll_mutex_lock(lock) \
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+#define lll_lock(lock, private) \
   (void) ({                                                                  \
     int *__futex = &(lock);                                                  \
     if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
                          0) != 0)                                            \
-      __lll_lock_wait (__futex);                                             \
+      {                                                                              \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+         __lll_lock_wait_private (__futex);                                  \
+       else                                                                  \
+         __lll_lock_wait (__futex, private);                                 \
+      }                                                                              \
+  })
+
+#define lll_robust_lock(lock, id, private) \
+  ({                                                                         \
+    int *__futex = &(lock);                                                  \
+    int __val = 0;                                                           \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_lock_wait (__futex, private);                     \
+    __val;                                                                   \
   })
 
-#define lll_mutex_cond_lock(lock) \
+#define lll_cond_lock(lock, private) \
   (void) ({                                                                  \
     int *__futex = &(lock);                                                  \
     if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
                          0) != 0)                                            \
-      __lll_lock_wait (__futex);                                             \
+      __lll_lock_wait (__futex, private);                                    \
+  })
+
+#define lll_robust_cond_lock(lock, id, private) \
+  ({                                                                         \
+    int *__futex = &(lock);                                                  \
+    int __val = 0;                                                           \
+    int __id = id | FUTEX_WAITERS;                                           \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
+                                                               0), 0))       \
+      __val = __lll_robust_lock_wait (__futex, private);                     \
+    __val;                                                                   \
   })
 
+
 extern int __lll_timedlock_wait
-  (int *futex, const struct timespec *) attribute_hidden;
+  (int *futex, const struct timespec *, int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+  (int *futex, const struct timespec *, int private) attribute_hidden;
 
-#define lll_mutex_timedlock(lock, abstime) \
+#define lll_timedlock(lock, abstime, private) \
   ({                                                                         \
     int *__futex = &(lock);                                                  \
     int __val = 0;                                                           \
     if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
                          0) != 0)                                            \
-      __val = __lll_timedlock_wait (__futex, abstime);                       \
+      __val = __lll_timedlock_wait (__futex, abstime, private);                      \
+    __val;                                                                   \
+  })
+
+#define lll_robust_timedlock(lock, abstime, id, private) \
+  ({                                                                         \
+    int *__futex = &(lock);                                                  \
+    int __val = 0;                                                           \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+                                                               0), 0))       \
+      __val = __lll_robust_timedlock_wait (__futex, abstime, private);       \
     __val;                                                                   \
   })
 
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
   ((void) ({                                                                 \
     int *__futex = &(lock);                                                  \
     int __val = atomic_exchange_rel (__futex, 0);                            \
     if (__builtin_expect (__val > 1, 0))                                     \
-      lll_futex_wake (__futex, 1);                                           \
+      lll_futex_wake (__futex, 1, private);                                  \
   }))
 
-#define lll_mutex_unlock_force(lock) \
+#define lll_robust_unlock(lock, private) \
   ((void) ({                                                                 \
     int *__futex = &(lock);                                                  \
-    *__futex = 0;                                                            \
-    __asm __volatile (__lll_rel_instr ::: "memory");                         \
-    lll_futex_wake (__futex, 1);                                             \
+    int __val = atomic_exchange_rel (__futex, 0);                            \
+    if (__builtin_expect (__val & FUTEX_WAITERS, 0))                         \
+      lll_futex_wake (__futex, 1, private);                                  \
   }))
 
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
 
-/* Our internal lock implementation is identical to the binary-compatible
-   mutex implementation. */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER           (0)
 #define LLL_LOCK_INITIALIZER_LOCKED    (1)
 
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
 /* The states of a lock are:
     0  -  untaken
     1  -  taken by one user
    >1  -  taken by more users */
 
-#define lll_trylock(lock)      lll_mutex_trylock (lock)
-#define lll_lock(lock)         lll_mutex_lock (lock)
-#define lll_unlock(lock)       lll_mutex_unlock (lock)
-#define lll_islocked(lock)     lll_mutex_islocked (lock)
-
 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -202,7 +297,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
   do {                                                                       \
     __typeof (tid) __tid;                                                    \
     while ((__tid = (tid)) != 0)                                             \
-      lll_futex_wait (&(tid), __tid);                                        \
+      lll_futex_wait (&(tid), __tid, LLL_SHARED);                            \
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -216,26 +311,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;                                                                   \
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif /* lowlevellock.h */
index e19579e..675a997 100644 (file)
@@ -1,3 +1,9 @@
-#define RESET_PID
-#include <tcb-offsets.h>
-#include <sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S>
+/* We want an #include_next, but we are the main source file.
+   So, #include ourselves and in that incarnation we can use #include_next.  */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <clone.S>
+#else
+# define RESET_PID
+# include_next <clone.S>
+#endif
index 0e62256..88b24e7 100644 (file)
@@ -1,5 +1,5 @@
 /* Cancellable system call stubs.  Linux/PowerPC version.
-   Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>, 2003.
 
@@ -15,8 +15,8 @@
 
    You should have received a copy of the GNU Lesser General Public
    License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
 
 #include <sysdep.h>
 #include <tls.h>
@@ -30,7 +30,6 @@
 # define PSEUDO(name, syscall_name, args)                              \
   .section ".text";                                                    \
   ENTRY (name)                                                         \
-    cfi_startproc;                                                     \
     SINGLE_THREAD_P;                                                   \
     bne- .Lpseudo_cancel;                                              \
   .type __##syscall_name##_nocancel,@function;                         \
@@ -45,7 +44,6 @@
     mflr 9;                                                            \
     stw 9,52(1);                                                       \
     cfi_offset (lr, 4);                                                        \
-    CGOTSETUP;                                                         \
     DOCARGS_##args;    /* save syscall args around CENABLE.  */        \
     CENABLE;                                                           \
     stw 3,16(1);       /* store CENABLE return value (MASK).  */       \
     lwz 4,52(1);                                                       \
     lwz 0,12(1);       /* restore CR/R3. */                            \
     lwz 3,8(1);                                                                \
-    CGOTRESTORE;                                                       \
     mtlr 4;                                                            \
     mtcr 0;                                                            \
-    addi 1,1,48;                                                       \
-    cfi_endproc;
+    addi 1,1,48;
 
 # define DOCARGS_0
 # define UNDOCARGS_0
@@ -86,9 +82,6 @@
 # define DOCARGS_6     stw 8,40(1); DOCARGS_5
 # define UNDOCARGS_6   lwz 8,40(1); UNDOCARGS_5
 
-# define CGOTSETUP
-# define CGOTRESTORE
-
 # ifdef IS_IN_libpthread
 #  define CENABLE      bl __pthread_enable_asynccancel@local
 #  define CDISABLE     bl __pthread_disable_asynccancel@local
 #  define CENABLE      bl __libc_enable_asynccancel@local
 #  define CDISABLE     bl __libc_disable_asynccancel@local
 # elif defined IS_IN_librt
-#  define CENABLE      bl JUMPTARGET(__librt_enable_asynccancel)
-#  define CDISABLE     bl JUMPTARGET(__librt_disable_asynccancel)
-#  if defined HAVE_AS_REL16 && defined __PIC__
-#   undef CGOTSETUP
-#   define CGOTSETUP                                                   \
-    bcl 20,31,1f;                                                      \
- 1: stw 30,44(1);                                                      \
-    mflr 30;                                                           \
-    addis 30,30,_GLOBAL_OFFSET_TABLE-1b@ha;                            \
-    addi 30,30,_GLOBAL_OFFSET_TABLE-1b@l
-#   undef CGOTRESTORE
-#   define CGOTRESTORE                                                 \
-    lwz 30,44(1)
-#  endif
+#  define CENABLE      bl __librt_enable_asynccancel@local
+#  define CDISABLE     bl __librt_disable_asynccancel@local
 # else
 #  error Unsupported library
 # endif
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index b7e2cf6..eed2a8f 100644 (file)
@@ -53,5 +53,5 @@ ENTRY (__vfork)
        PSEUDO_RET
 
 PSEUDO_END (__vfork)
-hidden_def (__vfork)
+libc_hidden_def (__vfork)
 weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions
deleted file mode 100644 (file)
index 3b111dd..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
-  GLIBC_2.3.3 {
-    # Changed timer_t.
-    timer_create; timer_delete; timer_getoverrun; timer_gettime;
-    timer_settime;
-  }
-}
index f87adf4..675a997 100644 (file)
@@ -1,3 +1,9 @@
-#define RESET_PID
-#include <tcb-offsets.h>
-#include <sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S>
+/* We want an #include_next, but we are the main source file.
+   So, #include ourselves and in that incarnation we can use #include_next.  */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <clone.S>
+#else
+# define RESET_PID
+# include_next <clone.S>
+#endif
index 226aaaf..707765a 100644 (file)
@@ -1,5 +1,5 @@
 /* Cancellable system call stubs.  Linux/PowerPC64 version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>, 2003.
 
@@ -15,8 +15,8 @@
 
    You should have received a copy of the GNU Lesser General Public
    License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
 
 #include <sysdep.h>
 #include <tls.h>
@@ -36,7 +36,6 @@
 # define PSEUDO(name, syscall_name, args)                              \
   .section ".text";                                                    \
   ENTRY (name)                                                         \
-    cfi_startproc;                                                     \
     SINGLE_THREAD_P;                                                   \
     bne- .Lpseudo_cancel;                                              \
   .type DASHDASHPFX(syscall_name##_nocancel),@function;                        \
@@ -66,8 +65,7 @@
     ld   3,64(1);                                                      \
     mtlr 9;                                                            \
     mtcr 0;                                                            \
-    addi 1,1,128;                                                      \
-    cfi_endproc;
+    addi 1,1,128;
 
 # define DOCARGS_0
 # define UNDOCARGS_0
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index 0181321..26885bb 100644 (file)
@@ -51,5 +51,5 @@ ENTRY (__vfork)
        PSEUDO_RET
 
 PSEUDO_END (__vfork)
-hidden_def (__vfork)
+libc_hidden_def (__vfork)
 weak_alias (__vfork, vfork)
index e1afff8..9690780 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -30,7 +30,7 @@ clear_once_control (void *arg)
   pthread_once_t *once_control = (pthread_once_t *) arg;
 
   *once_control = 0;
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 }
 
 
@@ -74,7 +74,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
        break;
 
       /* Same generation, some other thread was faster. Wait.  */
-      lll_futex_wait (once_control, oldval);
+      lll_futex_wait (once_control, oldval, LLL_PRIVATE);
     }
 
 
@@ -92,7 +92,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
   atomic_increment (once_control);
 
   /* Wake up all other threads.  */
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 
   return 0;
 }
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c
new file mode 100644 (file)
index 0000000..90f2dc6
--- /dev/null
@@ -0,0 +1,29 @@
+/* pthread_spin_unlock -- unlock a spin lock.  PowerPC version.
+   Copyright (C) 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+  __asm __volatile (__lll_rel_instr ::: "memory");
+  *lock = 0;
+  return 0;
+}
index 06b3bd0..0082c57 100644 (file)
@@ -1,5 +1,5 @@
 /* sem_post -- post to a POSIX semaphore.  Powerpc version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
 int
 __new_sem_post (sem_t *sem)
 {
-  int *futex = (int *) sem;
+  struct new_sem *isem = (struct new_sem *) sem;
 
   __asm __volatile (__lll_rel_instr ::: "memory");
-  int nr = atomic_increment_val (futex);
-  int err = lll_futex_wake (futex, nr);
-  if (__builtin_expect (err, 0) < 0)
+  atomic_increment (&isem->value);
+  __asm __volatile (__lll_acq_instr ::: "memory");
+  if (isem->nwaiters > 0)
     {
-      __set_errno (-err);
-      return -1;
+      int err = lll_futex_wake (&isem->value, 1,
+                               isem->private ^ FUTEX_PRIVATE_FLAG);
+      if (__builtin_expect (err, 0) < 0)
+       {
+         __set_errno (-err);
+         return -1;
+       }
     }
   return 0;
 }
index 3e1b70f..a1e228e 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <unistd.h>
 
-extern int __libc_fork (void);
 
 pid_t
 __fork (void)
index 7bee297..d256ebc 100644 (file)
 #include <tls.h>
 #include <bits/kernel-features.h>
 
-extern __typeof(raise) __raise;
-int __raise (int sig)
+
+int
+raise (
+     int sig)
 {
 #if __ASSUME_TGKILL || defined __NR_tgkill
   /* raise is an async-safe function.  It could be called while the
@@ -48,6 +50,3 @@ int __raise (int sig)
   return INLINE_SYSCALL (tkill, 2, THREAD_GETMEM (THREAD_SELF, tid), sig);
 #endif
 }
-libc_hidden_proto(raise)
-weak_alias(__raise, raise)
-libc_hidden_weak(raise)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym
new file mode 100644 (file)
index 0000000..46fbd0d
--- /dev/null
@@ -0,0 +1,8 @@
+#include <pthreadP.h>
+
+-- These PI macros are used by assembly code.
+
+MUTEX_KIND     offsetof (pthread_mutex_t, __data.__kind)
+ROBUST_BIT     PTHREAD_MUTEX_ROBUST_NORMAL_NP
+PI_BIT         PTHREAD_MUTEX_PRIO_INHERIT_NP
+PS_BIT         PTHREAD_MUTEX_PSHARED_BIT
index aec1bc3..b4fb1a2 100644 (file)
@@ -38,8 +38,7 @@ __pthread_attr_getaffinity_np(const pthread_attr_t *attr, size_t cpusetsize,
     {
       /* Check whether there are any bits set beyond the limits
         the user requested.  */
-      size_t cnt;
-      for (cnt = cpusetsize; cnt < iattr->cpusetsize; ++cnt)
+      for (size_t cnt = cpusetsize; cnt < iattr->cpusetsize; ++cnt)
        if (((char *) iattr->cpuset)[cnt] != 0)
          return EINVAL;
 
index 580cf2c..609ee2a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -26,7 +26,7 @@
 
 
 /* Defined in pthread_setaffinity.c.  */
-extern size_t __kernel_cpumask_size;
+extern size_t __kernel_cpumask_size attribute_hidden;
 extern int __determine_cpumask_size (pid_t tid);
 libpthread_hidden_proto(__determine_cpumask_size)
 
@@ -57,8 +57,7 @@ pthread_attr_setaffinity_np (pthread_attr_t *attr, size_t cpusetsize,
 
       /* Check whether the new bitmask has any bit set beyond the
         last one the kernel accepts.  */
-      size_t cnt;
-      for (cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
+      for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
        if (((char *) cpuset)[cnt] != '\0')
          /* Found a nonzero byte.  This means the user request cannot be
             fulfilled.  */
index 189af77..affcc6a 100644 (file)
@@ -43,3 +43,4 @@ __pthread_getaffinity_np (pthread_t th, size_t cpusetsize, cpu_set_t *cpuset)
   return 0;
 }
 strong_alias(__pthread_getaffinity_np, pthread_getaffinity_np)
+
index 155d364..9e28f69 100644 (file)
@@ -1,5 +1,4 @@
-/* pthread_getcpuclockid -- Get POSIX clockid_t for a pthread_t.  Linux version
-   Copyright (C) 2000,2001,2002,2003,2004,2005 Free Software Foundation, Inc.
+/* Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #include <pthreadP.h>
 #include <sys/time.h>
 #include <tls.h>
-#include <bits/kernel-features.h>
-#include <kernel-posix-cpu-timers.h>
 
 
-#if !(__ASSUME_POSIX_CPU_TIMERS > 0)
-int __libc_missing_posix_cpu_timers attribute_hidden;
-#endif
-#if !(__ASSUME_POSIX_TIMERS > 0)
-int __libc_missing_posix_timers attribute_hidden;
-#endif
-
 int
 pthread_getcpuclockid (
      pthread_t threadid,
@@ -44,50 +34,6 @@ pthread_getcpuclockid (
     /* Not a valid thread handle.  */
     return ESRCH;
 
-#ifdef __NR_clock_getres
-  /* The clockid_t value is a simple computation from the TID.
-     But we do a clock_getres call to validate it if we aren't
-     yet sure we have the kernel support.  */
-
-  const clockid_t tidclock = MAKE_THREAD_CPUCLOCK (pd->tid, CPUCLOCK_SCHED);
-
-# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
-#  if !(__ASSUME_POSIX_TIMERS > 0)
-  if (__libc_missing_posix_timers && !__libc_missing_posix_cpu_timers)
-    __libc_missing_posix_cpu_timers = 1;
-#  endif
-  if (!__libc_missing_posix_cpu_timers)
-    {
-      INTERNAL_SYSCALL_DECL (err);
-      int r = INTERNAL_SYSCALL (clock_getres, err, 2, tidclock, NULL);
-      if (!INTERNAL_SYSCALL_ERROR_P (r, err))
-# endif
-       {
-         *clockid = tidclock;
-         return 0;
-       }
-
-# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
-#  if !(__ASSUME_POSIX_TIMERS > 0)
-      if (INTERNAL_SYSCALL_ERRNO (r, err) == ENOSYS)
-       {
-         /* The kernel doesn't support these calls at all.  */
-         __libc_missing_posix_timers = 1;
-         __libc_missing_posix_cpu_timers = 1;
-       }
-      else
-#  endif
-       if (INTERNAL_SYSCALL_ERRNO (r, err) == EINVAL)
-         {
-           /* The kernel doesn't support these clocks at all.  */
-           __libc_missing_posix_cpu_timers = 1;
-         }
-      else
-       return INTERNAL_SYSCALL_ERRNO (r, err);
-    }
-# endif
-#endif
-
 #ifdef CLOCK_THREAD_CPUTIME_ID
   /* We need to store the thread ID in the CLOCKID variable together
      with a number identifying the clock.  We reserve the low 3 bits
index 8d887e0..3a70c37 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -33,7 +33,15 @@ __pthread_kill (
   struct pthread *pd = (struct pthread *) threadid;
 
   /* Make sure the descriptor is valid.  */
-  if (INVALID_TD_P (pd))
+  if (DEBUGGING_P && INVALID_TD_P (pd))
+    /* Not a valid thread handle.  */
+    return ESRCH;
+
+  /* Force load of pd->tid into local variable or register.  Otherwise
+     if a thread exits between ESRCH test and tgkill, we might return
+     EINVAL, because pd->tid would be cleared by the kernel.  */
+  pid_t tid = atomic_forced_read (pd->tid);
+  if (__builtin_expect (tid <= 0, 0))
     /* Not a valid thread handle.  */
     return ESRCH;
 
@@ -53,15 +61,15 @@ __pthread_kill (
   int val;
 #if __ASSUME_TGKILL
   val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
-                         pd->tid, signo);
+                         tid, signo);
 #else
 # ifdef __NR_tgkill
   val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
-                         pd->tid, signo);
+                         tid, signo);
   if (INTERNAL_SYSCALL_ERROR_P (val, err)
       && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
 # endif
-    val = INTERNAL_SYSCALL (tkill, err, 2, pd->tid, signo);
+    val = INTERNAL_SYSCALL (tkill, err, 2, tid, signo);
 #endif
 
   return (INTERNAL_SYSCALL_ERROR_P (val, err)
index 047c643..804bfab 100644 (file)
@@ -1,8 +1,14 @@
 #include <pthreadP.h>
 
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock(mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock(mutex)
+#define LLL_MUTEX_LOCK(mutex) \
+  lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+#define LLL_MUTEX_TRYLOCK(mutex) \
+  lll_cond_trylock ((mutex)->__data.__lock)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+  lll_robust_cond_lock ((mutex)->__data.__lock, id, \
+                       PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
 #define __pthread_mutex_lock __pthread_mutex_cond_lock
+#define __pthread_mutex_lock_full __pthread_mutex_cond_lock_full
 #define NO_INCR
 
 #include <pthread_mutex_lock.c>
index d28b6f2..467e8ec 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -24,7 +24,7 @@
 #include <sys/types.h>
 
 
-size_t __kernel_cpumask_size;
+size_t __kernel_cpumask_size attribute_hidden;
 
 
 /* Determine the current affinity.  As a side affect we learn
@@ -71,8 +71,7 @@ pthread_setaffinity_np (pthread_t th, size_t cpusetsize,
 
   /* We now know the size of the kernel cpumask_t.  Make sure the user
      does not request to set a bit beyond that.  */
-  size_t cnt;
-  for (cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
+  for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
     if (((char *) cpuset)[cnt] != '\0')
       /* Found a nonzero byte.  This means the user request cannot be
         fulfilled.  */
@@ -80,6 +79,12 @@ pthread_setaffinity_np (pthread_t th, size_t cpusetsize,
 
   res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, cpusetsize,
                          cpuset);
+
+#ifdef RESET_VGETCPU_CACHE
+  if (!INTERNAL_SYSCALL_ERROR_P (res, err))
+    RESET_VGETCPU_CACHE ();
+#endif
+
   return (INTERNAL_SYSCALL_ERROR_P (res, err)
          ? INTERNAL_SYSCALL_ERRNO (res, err)
          : 0);
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c
new file mode 100644 (file)
index 0000000..9e49085
--- /dev/null
@@ -0,0 +1,83 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthreadP.h>
+#include <tls.h>
+#include <sysdep.h>
+#include <bits/kernel-features.h>
+
+
+int
+pthread_sigqueue (
+     pthread_t threadid,
+     int signo,
+     const union sigval value)
+{
+#ifdef __NR_rt_tgsigqueueinfo
+  struct pthread *pd = (struct pthread *) threadid;
+
+  /* Make sure the descriptor is valid.  */
+  if (DEBUGGING_P && INVALID_TD_P (pd))
+    /* Not a valid thread handle.  */
+    return ESRCH;
+
+  /* Force load of pd->tid into local variable or register.  Otherwise
+     if a thread exits between ESRCH test and tgkill, we might return
+     EINVAL, because pd->tid would be cleared by the kernel.  */
+  pid_t tid = atomic_forced_read (pd->tid);
+  if (__builtin_expect (tid <= 0, 0))
+    /* Not a valid thread handle.  */
+    return ESRCH;
+
+  /* Disallow sending the signal we use for cancellation, timers, for
+     for the setxid implementation.  */
+  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
+    return EINVAL;
+
+  /* Set up the siginfo_t structure.  */
+  siginfo_t info;
+  memset (&info, '\0', sizeof (siginfo_t));
+  info.si_signo = signo;
+  info.si_code = SI_QUEUE;
+  info.si_pid = THREAD_GETMEM (THREAD_SELF, pid);
+  info.si_uid = getuid ();
+  info.si_value = value;
+
+  /* We have a special syscall to do the work.  */
+  INTERNAL_SYSCALL_DECL (err);
+
+  /* One comment: The PID field in the TCB can temporarily be changed
+     (in fork).  But this must not affect this code here.  Since this
+     function would have to be called while the thread is executing
+     fork, it would have to happen in a signal handler.  But this is
+     no allowed, pthread_sigqueue is not guaranteed to be async-safe.  */
+  int val = INTERNAL_SYSCALL (rt_tgsigqueueinfo, err, 4,
+                             THREAD_GETMEM (THREAD_SELF, pid),
+                             tid, signo, &info);
+
+  return (INTERNAL_SYSCALL_ERROR_P (val, err)
+         ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
+#else
+  return ENOSYS;
+#endif
+}
index de794e4..da35cfe 100644 (file)
@@ -25,8 +25,9 @@
 #include <bits/kernel-features.h>
 
 
-extern __typeof(raise) __raise;
-int __raise (int sig)
+int
+raise (
+     int sig)
 {
   struct pthread *pd = THREAD_SELF;
 #if __ASSUME_TGKILL || defined __NR_tgkill
@@ -70,6 +71,4 @@ int __raise (int sig)
   return INLINE_SYSCALL (tkill, 2, selftid, sig);
 #endif
 }
-libc_hidden_proto(raise)
-weak_alias(__raise, raise)
-libc_hidden_weak(raise)
+libc_hidden_def (raise)
index f6c3de4..9e36858 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
-#include "fork.h"
+#include <fork.h>
+#include <atomic.h>
 
 
 /* Lock to protect allocation and deallocation of fork handlers.  */
-lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER;
+int __fork_lock = LLL_LOCK_INITIALIZER;
 
 
 /* Number of pre-allocated handler entries.  */
@@ -85,7 +86,7 @@ __register_atfork (
      void *dso_handle)
 {
   /* Get the lock to not conflict with other allocations.  */
-  lll_lock (__fork_lock);
+  lll_lock (__fork_lock, LLL_PRIVATE);
 
   struct fork_handler *newp = fork_handler_alloc ();
 
@@ -97,12 +98,49 @@ __register_atfork (
       newp->child_handler = child;
       newp->dso_handle = dso_handle;
 
-      newp->next = __fork_handlers;
-      __fork_handlers = newp;
+      __linkin_atfork (newp);
     }
 
   /* Release the lock.  */
-  lll_unlock (__fork_lock);
+  lll_unlock (__fork_lock, LLL_PRIVATE);
 
   return newp == NULL ? ENOMEM : 0;
 }
+libc_hidden_def (__register_atfork)
+
+
+void
+attribute_hidden
+__linkin_atfork (struct fork_handler *newp)
+{
+  do
+    newp->next = __fork_handlers;
+  while (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
+                                               newp, newp->next) != 0);
+}
+
+
+libc_freeres_fn (free_mem)
+{
+  /* Get the lock to not conflict with running forks.  */
+  lll_lock (__fork_lock, LLL_PRIVATE);
+
+  /* No more fork handlers.  */
+  __fork_handlers = NULL;
+
+  /* Free eventually alloated memory blocks for the object pool.  */
+  struct fork_handler_pool *runp = fork_handler_pool.next;
+
+  memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
+
+  /* Release the lock.  */
+  lll_unlock (__fork_lock, LLL_PRIVATE);
+
+  /* We can free the memory after releasing the lock.  */
+  while (runp != NULL)
+    {
+      struct fork_handler_pool *oldp = runp;
+      runp = runp->next;
+      free (oldp);
+    }
+}
index 7f0b679..7ed0df8 100644 (file)
@@ -1,5 +1,5 @@
 /* sem_post -- post to a POSIX semaphore.  Generic futex-using version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
 int
 __new_sem_post (sem_t *sem)
 {
-  int *futex = (int *) sem;
+  struct new_sem *isem = (struct new_sem *) sem;
 
-  int nr = atomic_increment_val (futex);
-  int err = lll_futex_wake (futex, nr);
-  if (__builtin_expect (err, 0) < 0)
+  __typeof (isem->value) cur;
+  do
     {
-      __set_errno (-err);
-      return -1;
+      cur = isem->value;
+      if (isem->value == SEM_VALUE_MAX)
+       {
+         __set_errno (EOVERFLOW);
+         return -1;
+       }
+    }
+  while (atomic_compare_and_exchange_bool_acq (&isem->value, cur + 1, cur));
+
+  atomic_full_barrier ();
+  if (isem->nwaiters > 0)
+    {
+      int err = lll_futex_wake (&isem->value, 1,
+                               isem->private ^ FUTEX_PRIVATE_FLAG);
+      if (__builtin_expect (err, 0) < 0)
+       {
+         __set_errno (-err);
+         return -1;
+       }
     }
   return 0;
 }
index 79b1194..3e5e6dc 100644 (file)
@@ -1,5 +1,5 @@
 /* sem_timedwait -- wait on a semaphore.  Generic futex-using version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
 #include <lowlevellock.h>
 #include <internaltypes.h>
 #include <semaphore.h>
+
 #include <pthreadP.h>
 
 
+extern void __sem_wait_cleanup (void *arg) attribute_hidden;
+
+
 int
 sem_timedwait (sem_t *sem, const struct timespec *abstime)
 {
-  /* First check for cancellation.  */
-  CANCELLATION_P (THREAD_SELF);
-
-  int *futex = (int *) sem;
-  int val;
+  struct new_sem *isem = (struct new_sem *) sem;
   int err;
 
-  if (*futex > 0)
+  if (atomic_decrement_if_positive (&isem->value) > 0)
+    return 0;
+
+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
     {
-      val = atomic_decrement_if_positive (futex);
-      if (val > 0)
-       return 0;
+      __set_errno (EINVAL);
+      return -1;
     }
 
-  err = -EINVAL;
-  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
-    goto error_return;
+  atomic_increment (&isem->nwaiters);
+
+  pthread_cleanup_push (__sem_wait_cleanup, isem);
 
-  do
+  while (1)
     {
       struct timeval tv;
       struct timespec rt;
       int sec, nsec;
 
       /* Get the current time.  */
-      gettimeofday (&tv, NULL);
+      __gettimeofday (&tv, NULL);
 
       /* Compute relative timeout.  */
       sec = abstime->tv_sec - tv.tv_sec;
@@ -68,7 +70,11 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
       /* Already timed out?  */
       err = -ETIMEDOUT;
       if (sec < 0)
-       goto error_return;
+       {
+         __set_errno (ETIMEDOUT);
+         err = -1;
+         break;
+       }
 
       /* Do wait.  */
       rt.tv_sec = sec;
@@ -77,21 +83,29 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
       /* Enable asynchronous cancellation.  Required by the standard.  */
       int oldtype = __pthread_enable_asynccancel ();
 
-      err = lll_futex_timed_wait (futex, 0, &rt);
+      err = lll_futex_timed_wait (&isem->value, 0, &rt,
+                                 isem->private ^ FUTEX_PRIVATE_FLAG);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (oldtype);
 
       if (err != 0 && err != -EWOULDBLOCK)
-       goto error_return;
+       {
+         __set_errno (-err);
+         err = -1;
+         break;
+       }
 
-      val = atomic_decrement_if_positive (futex);
+      if (atomic_decrement_if_positive (&isem->value) > 0)
+       {
+         err = 0;
+         break;
+       }
     }
-  while (val <= 0);
 
-  return 0;
+  pthread_cleanup_pop (0);
+
+  atomic_decrement (&isem->nwaiters);
 
- error_return:
-  __set_errno (-err);
-  return -1;
+  return err;
 }
index 416d863..e661e09 100644 (file)
@@ -1,5 +1,5 @@
 /* sem_wait -- wait on a semaphore.  Generic futex-using version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
 #include <lowlevellock.h>
 #include <internaltypes.h>
 #include <semaphore.h>
+
 #include <pthreadP.h>
 
 
+void
+attribute_hidden
+__sem_wait_cleanup (void *arg)
+{
+  struct new_sem *isem = (struct new_sem *) arg;
+
+  atomic_decrement (&isem->nwaiters);
+}
+
+
 int
 __new_sem_wait (sem_t *sem)
 {
-  /* First check for cancellation.  */
-  CANCELLATION_P (THREAD_SELF);
-
-  int *futex = (int *) sem;
+  struct new_sem *isem = (struct new_sem *) sem;
   int err;
 
-  do
-    {
-      if (atomic_decrement_if_positive (futex) > 0)
-       return 0;
+  if (atomic_decrement_if_positive (&isem->value) > 0)
+    return 0;
 
+  atomic_increment (&isem->nwaiters);
+
+  pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+  while (1)
+    {
       /* Enable asynchronous cancellation.  Required by the standard.  */
       int oldtype = __pthread_enable_asynccancel ();
 
-      err = lll_futex_wait (futex, 0);
+      err = lll_futex_wait (&isem->value, 0,
+                           isem->private ^ FUTEX_PRIVATE_FLAG);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (oldtype);
+
+      if (err != 0 && err != -EWOULDBLOCK)
+       {
+         __set_errno (-err);
+         err = -1;
+         break;
+       }
+
+      if (atomic_decrement_if_positive (&isem->value) > 0)
+       {
+         err = 0;
+         break;
+       }
     }
-  while (err == 0 || err == -EWOULDBLOCK);
 
-  __set_errno (-err);
-  return -1;
+  pthread_cleanup_pop (0);
+
+  atomic_decrement (&isem->nwaiters);
+
+  return err;
 }
 weak_alias(__new_sem_wait, sem_wait)
index 9bd7569..940bd62 100644 (file)
@@ -7,8 +7,8 @@
 
 libpthread_SSRC = pt-vfork.S pthread_once.S pthread_rwlock_wrlock.S \
                        pthread_rwlock_rdlock.S pthread_rwlock_unlock.S \
-                       lowlevellock.S pthread_barrier_wait.S pthread_cond_broadcast.S \
-                       pthread_cond_signal.S \
+                       lowlevellock.S lowlevelrobustlock.S pthread_barrier_wait.S \
+                       pthread_cond_broadcast.S pthread_cond_signal.S \
                        pthread_rwlock_timedwrlock.S pthread_rwlock_timedrdlock.S \
                        sem_post.S sem_timedwait.S sem_trywait.S sem_wait.S
 
@@ -43,6 +43,7 @@ ASFLAGS-sem_wait.S = -D_LIBC_REENTRANT -DUSE___THREAD
 ASFLAGS-libc-lowlevellock.S = -D_LIBC_REENTRANT  -DUSE___THREAD
 
 ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevelrobustlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
 
 ASFLAGS-clone.S = -D_LIBC_REENTRANT
 ASFLAGS-vfork.S = -D_LIBC_REENTRANT
index 969686d..badcda5 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -20,6 +21,8 @@
 #ifndef _BITS_PTHREADTYPES_H
 #define _BITS_PTHREADTYPES_H   1
 
+#include <endian.h>
+
 #define __SIZEOF_PTHREAD_ATTR_T 36
 #define __SIZEOF_PTHREAD_MUTEX_T 24
 #define __SIZEOF_PTHREAD_MUTEXATTR_T 4
@@ -127,9 +130,21 @@ typedef union
     unsigned int __writer_wakeup;
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
+#if __BYTE_ORDER == __BIG_ENDIAN
+    unsigned char __pad1;
+    unsigned char __pad2;
+    unsigned char __shared;
+    /* FLAGS must stay at this position in the structure to maintain
+       binary compatibility.  */
+    unsigned char __flags;
+#else
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
-    unsigned int __flags;
+    unsigned char __flags;
+    unsigned char __shared;
+    unsigned char __pad1;
+    unsigned char __pad2;
+#endif
     pthread_t __writer;
   } __data;
   char __size[__SIZEOF_PTHREAD_RWLOCK_T];
index e6c5d84..934493c 100644 (file)
@@ -28,9 +28,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
index dcedd26..6868b9b 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <sched.h>
 #include <signal.h>
-#include <stdio.h>
 #include <sysdep.h>
 #include <tls.h>
 
index 94a24b4..feb8211 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
index 062ce28..c702836 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
        mov.l   reg, mem; \
 99:    mov     r1, r15
 
-#define        XADD(reg, mem, new, old) \
+#define        XADD(reg, mem, old, tmp) \
        .align  2; \
        mova    99f, r0; \
        nop; \
        mov     r15, r1; \
-       mov     _IMM4, r15; \
+       mov     _IMM8, r15; \
 98:    mov.l   mem, old; \
-       mov     old, new; \
-       add     reg, new; \
-       mov.l   new, mem; \
+       mov     reg, tmp; \
+       add     old, tmp; \
+       mov.l   tmp, mem; \
 99:    mov     r1, r15
 
 #define        XCHG(reg, mem, old) \
index a5c916b..6d40364 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
 #include "lowlevel-atomic.h"
 
        .text
 
-#define SYS_gettimeofday       __NR_gettimeofday
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+       mov     #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg; \
+       extu.b  reg, reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+       mov     #(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg; \
+       extu.b  reg, reg
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+       mov     #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+       extu.b  tmp, tmp; \
+       xor     tmp, reg
+# define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \
+       mov     #(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG), tmp; \
+       extu.b  tmp, tmp; \
+       mov     #(FUTEX_CLOCK_REALTIME >> 8), tmp2; \
+       swap.b  tmp2, tmp2; \
+       or      tmp2, tmp; \
+       xor     tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+       mov     #(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), tmp; \
+       extu.b  tmp, tmp; \
+       xor     tmp, reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, reg        ; \
+       add     reg, tmp        ; \
+       bra     98f             ; \
+        mov.l  @tmp, reg       ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, reg        ; \
+       add     reg, tmp        ; \
+       mov.l   @tmp, reg       ; \
+       bra     98f             ; \
+        mov    #FUTEX_WAIT, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    or      tmp, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, reg        ; \
+       add     reg, tmp        ; \
+       mov.l   @tmp, reg       ; \
+       bra     98f             ; \
+        mov    #FUTEX_WAKE, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    or      tmp, reg
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, tmp2       ; \
+       add     tmp2, tmp       ; \
+       mov.l   @tmp, tmp2      ; \
+       bra     98f             ; \
+        mov    #FUTEX_PRIVATE_FLAG, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    extu.b  tmp, tmp        ; \
+       xor     tmp, reg        ; \
+       and     tmp2, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, tmp2       ; \
+       add     tmp2, tmp       ; \
+       mov.l   @tmp, tmp2      ; \
+       bra     98f             ; \
+        mov    #FUTEX_PRIVATE_FLAG, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    extu.b  tmp, tmp        ; \
+       xor     tmp, reg        ; \
+       and     tmp2, reg       ; \
+       mov     #FUTEX_WAIT, tmp ; \
+       or      tmp, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, tmp2       ; \
+       add     tmp2, tmp       ; \
+       mov.l   @tmp, tmp2      ; \
+       bra     98f             ; \
+        mov    #FUTEX_PRIVATE_FLAG, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    extu.b  tmp, tmp        ; \
+       xor     tmp, reg        ; \
+       and     tmp2, reg       ; \
+       mov     #FUTEX_WAIT_BITSET, tmp ; \
+       mov     #(FUTEX_CLOCK_REALTIME >> 8), tmp2; \
+       swap.b  tmp2, tmp2; \
+       or      tmp2, tmp; \
+       or      tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, tmp2       ; \
+       add     tmp2, tmp       ; \
+       mov.l   @tmp, tmp2      ; \
+       bra     98f             ; \
+        mov    #FUTEX_PRIVATE_FLAG, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    extu.b  tmp, tmp        ; \
+       xor     tmp, reg        ; \
+       and     tmp2, reg       ; \
+       mov     #FUTEX_WAKE, tmp ; \
+       or      tmp, reg
+#endif
 
-       .globl  __lll_mutex_lock_wait
-       .type   __lll_mutex_lock_wait,@function
-       .hidden __lll_mutex_lock_wait
+       .globl  __lll_lock_wait_private
+       .type   __lll_lock_wait_private,@function
+       .hidden __lll_lock_wait_private
        .align  5
-       /* void __lll_mutex_lock_wait (int val, int *__futex) */
-__lll_mutex_lock_wait:
-       mov     #2, r6
-       cmp/eq  r4, r6
-       mov     r5, r4
+       cfi_startproc
+__lll_lock_wait_private:
+       mov.l   r8, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r8, 0)
+       mov     r4, r6
+       mov     r5, r8
        mov     #0, r7          /* No timeout.  */
-       mov     #FUTEX_WAIT, r5
+       LOAD_PRIVATE_FUTEX_WAIT (r5, r0, r1)
 
+       mov     #2, r4
+       cmp/eq  r4, r6
        bf      2f
 
 1:
+       mov     r8, r4
        mov     #SYS_futex, r3
        extu.b  r3, r3
        trapa   #0x14
        SYSCALL_INST_PAD
 
 2:
-       XCHG (r6, @r4, r2)
+       mov     #2, r6
+       XCHG (r6, @r8, r2)
        tst     r2, r2
        bf      1b
 
+       mov.l   @r15+, r8
        rts
-        nop
-       .size   __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+        mov    r2, r0
+       cfi_endproc
+       .size   __lll_lock_wait_private,.-__lll_lock_wait_private
 
 #ifdef NOT_IN_libc
-       .globl  __lll_mutex_timedlock_wait
-       .type   __lll_mutex_timedlock_wait,@function
-       .hidden __lll_mutex_timedlock_wait
+       .globl  __lll_lock_wait
+       .type   __lll_lock_wait,@function
+       .hidden __lll_lock_wait
        .align  5
-       /* int __lll_mutex_timedlock_wait (int val, int *__futex,
-                                      const struct timespec *abstime) */
-__lll_mutex_timedlock_wait:
+       cfi_startproc
+__lll_lock_wait:
+       mov.l   r9, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r9, 0)
+       mov.l   r8, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r8, 0)
+       mov     r6, r9
+       mov     r4, r6
+       mov     r5, r8
+       mov     #0, r7          /* No timeout.  */
+       mov     r9, r5
+       LOAD_FUTEX_WAIT (r5, r0, r1)
+
+       mov     #2, r4
+       cmp/eq  r4, r6
+       bf      2f
+
+1:
+       mov     r8, r4
+       mov     #SYS_futex, r3
+       extu.b  r3, r3
+       trapa   #0x14
+       SYSCALL_INST_PAD
+
+2:
+       mov     #2, r6
+       XCHG (r6, @r8, r2)
+       tst     r2, r2
+       bf      1b
+
+       mov.l   @r15+, r8
+       mov.l   @r15+, r9
+       ret
+        mov    r2, r0
+       cfi_endproc
+       .size   __lll_lock_wait,.-__lll_lock_wait
+
+       /*      r5  (r8): futex
+               r7 (r11): flags
+               r6  (r9): timeout
+               r4 (r10): futex value
+       */
+       .globl  __lll_timedlock_wait
+       .type   __lll_timedlock_wait,@function
+       .hidden __lll_timedlock_wait
+       .align  5
+       cfi_startproc
+__lll_timedlock_wait:
+       mov.l   r12, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r12, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       mov.l   .Lhave, r1
+#  ifdef PIC
+       mova    .Lgot, r0
+       mov.l   .Lgot, r12
+       add     r0, r12
+       add     r12, r1
+#  endif
+       mov.l   @r1, r0
+       tst     r0, r0
+       bt      .Lreltmo
+# endif
+
+       mov     r4, r2
+       mov     r5, r4
+       mov     r7, r5
+       mov     r6, r7
+       LOAD_FUTEX_WAIT_ABS (r5, r0, r1)
+
+       mov     #2, r6
+       cmp/eq  r6, r2
+       bf/s    2f
+        mov    r6, r2
+
+1:
+       mov     #2, r6
+       mov     #-1, r1
+       mov     #SYS_futex, r3
+       extu.b  r3, r3
+       trapa   #0x16
+       SYSCALL_INST_PAD
+       mov     r0, r6
+
+2:
+       XCHG    (r2, @r4, r3)   /* NB:   lock is implied */
+
+       tst     r3, r3
+       bt/s    3f
+        mov    r6, r0
+
+       cmp/eq  #-ETIMEDOUT, r0
+       bt      4f
+       cmp/eq  #-EINVAL, r0
+       bf      1b
+4:
+       neg     r0, r3
+3:
+       mov     r3, r0
+       rts
+        mov.l  @r15+, r12
+
+       .align  2
+# ifdef PIC
+.Lgot:
+       .long   _GLOBAL_OFFSET_TABLE_
+.Lhave:
+       .long   __have_futex_clock_realtime@GOTOFF
+# else
+.Lhave:
+       .long   __have_futex_clock_realtime
+# endif
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
        /* Check for a valid timeout value.  */
        mov.l   @(4,r6), r1
        mov.l   .L1g, r0
        cmp/hs  r0, r1
        bt      3f
 
+       mov.l   r11, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r11, 0)
+       mov.l   r10, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r10, 0)
        mov.l   r9, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r9, 0)
        mov.l   r8, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r8, 0)
+       mov     r7, r11
+       mov     r4, r10
        mov     r6, r9
        mov     r5, r8
 
        /* Stack frame for the timespec and timeval structs.  */
        add     #-8, r15
+       cfi_adjust_cfa_offset(8)
+
+       mov     #2, r2
+       XCHG (r2, @r8, r3)
+
+       tst     r3, r3
+       bt      6f
 
 1:
        /* Get current time.  */
        mov     r15, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -105,56 +353,49 @@ __lll_mutex_timedlock_wait:
        add     #-1, r2
 4:
        cmp/pz  r2
-       bf      5f              /* Time is already up.  */
+       bf      2f              /* Time is already up.  */
 
        mov.l   r2, @r15        /* Store relative timeout.  */
        mov.l   r3, @(4,r15)
 
-       mov     #1, r3
-       mov     #2, r6
-       CMPXCHG (r3, @r8, r6, r2)
-       tst     r2, r2
-       bt      8f
-
        mov     r8, r4
-       mov     #FUTEX_WAIT, r5
+       mov     r11, r5
+       LOAD_FUTEX_WAIT (r5, r0, r1)
+       mov     r10, r6
        mov     r15, r7
        mov     #SYS_futex, r3
        extu.b  r3, r3
        trapa   #0x14
        SYSCALL_INST_PAD
-       mov     r0, r4
+       mov     r0, r5
 
-8:
-       mov     #0, r3
-       CMPXCHG (r3, @r8, r6, r2)
-       bf/s    7f
-       mov     #0, r0
+       mov     #2, r2
+       XCHG (r2, @r8, r3)
+
+       tst     r3, r3
+       bt/s    6f
+        mov    #-ETIMEDOUT, r1
+       cmp/eq  r5, r1
+       bf      1b
+
+2:     mov     #ETIMEDOUT, r3
 
 6:
+       mov     r3, r0
        add     #8, r15
        mov.l   @r15+, r8
-       rts
        mov.l   @r15+, r9
-7:
-       /* Check whether the time expired.  */
-       mov     #-ETIMEDOUT, r1
-       cmp/eq  r5, r1
-       bt      5f
+       mov.l   @r15+, r10
+       mov.l   @r15+, r11
+       rts
+        mov.l  @r15+, r12
 
-       /* Make sure the current holder knows we are going to sleep.  */
-       XCHG (r2, @r8, r3)
-       tst     r3, r3
-       bt/s    6b
-       mov     #0, r0
-       bra     1b
-       nop
 3:
+       mov.l   @r15+, r12
        rts
-       mov     #EINVAL, r0
-5:
-       bra     6b
-       mov     #ETIMEDOUT, r0
+        mov    #EINVAL, r0
+# endif
+       cfi_endproc
 
 .L1k:
        .word   1000
@@ -162,21 +403,16 @@ __lll_mutex_timedlock_wait:
 .L1g:
        .long   1000000000
 
-       .size   __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+       .size   __lll_timedlock_wait,.-__lll_timedlock_wait
 #endif
 
-
-#ifdef NOT_IN_libc
-       .globl  lll_unlock_wake_cb
-       .type   lll_unlock_wake_cb,@function
-       .hidden lll_unlock_wake_cb
+       .globl  __lll_unlock_wake_private
+       .type   __lll_unlock_wake_private,@function
+       .hidden __lll_unlock_wake_private
        .align  5
-lll_unlock_wake_cb:
-       DEC     (@r4, r2)
-       tst     r2, r2
-       bt      1f
-
-       mov     #FUTEX_WAKE, r5
+       cfi_startproc
+__lll_unlock_wake_private:
+       LOAD_PRIVATE_FUTEX_WAKE (r5, r0, r1)
        mov     #1, r6          /* Wake one thread.  */
        mov     #0, r7
        mov.l   r7, @r4         /* Stores 0.  */
@@ -184,21 +420,19 @@ lll_unlock_wake_cb:
        extu.b  r3, r3
        trapa   #0x14
        SYSCALL_INST_PAD
-
-1:
        rts
-       nop
-       .size   lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+        nop
+       cfi_endproc
+       .size   __lll_unlock_wake_private,.-__lll_unlock_wake_private
 
-       .globl  __lll_mutex_unlock_wake
-       .type   __lll_mutex_unlock_wake,@function
-       .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+       .globl  __lll_unlock_wake
+       .type   __lll_unlock_wake,@function
+       .hidden __lll_unlock_wake
        .align  5
-       /* void __lll_mutex_unlock_wake(int *__futex) */
-__lll_mutex_unlock_wake:
-       mov     #FUTEX_WAKE, r5
+       cfi_startproc
+__lll_unlock_wake:
+       LOAD_FUTEX_WAKE (r5, r0, r1)
        mov     #1, r6          /* Wake one thread.  */
        mov     #0, r7
        mov.l   r7, @r4         /* Stores 0.  */
@@ -207,29 +441,34 @@ __lll_mutex_unlock_wake:
        trapa   #0x14
        SYSCALL_INST_PAD
        rts
-       nop
-       .size   __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
-
+        nop
+       cfi_endproc
+       .size   __lll_unlock_wake,.-__lll_unlock_wake
 
-#ifdef NOT_IN_libc
        .globl  __lll_timedwait_tid
        .type   __lll_timedwait_tid,@function
        .hidden __lll_timedwait_tid
        .align  5
+       cfi_startproc
 __lll_timedwait_tid:
        mov.l   r9, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r9, 0)
        mov.l   r8, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r8, 0)
        mov     r4, r8
        mov     r5, r9
 
        /* Stack frame for the timespec and timeval structs.  */
        add     #-8, r15
+       cfi_adjust_cfa_offset(8)
 
 2:
        /* Get current time.  */
        mov     r15, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -260,7 +499,10 @@ __lll_timedwait_tid:
        bt      4f
 
        mov     r8, r4
-       mov     #FUTEX_WAIT, r5
+       /* XXX The kernel so far uses global futex for the wakeup at
+          all times.  */
+       mov     #0, r5
+       extu.b  r5, r5
        mov     r2, r6
        mov     r15, r7
        mov     #SYS_futex, r3
@@ -277,7 +519,7 @@ __lll_timedwait_tid:
        add     #8, r15
        mov.l   @r15+, r8
        rts
-       mov.l   @r15+, r9
+        mov.l  @r15+, r9
 1:
        /* Check whether the time expired.  */
        mov     #-ETIMEDOUT, r1
@@ -285,7 +527,8 @@ __lll_timedwait_tid:
        bf      2b
 6:
        bra     3b
-       mov     #ETIMEDOUT, r0
+        mov    #ETIMEDOUT, r0
+       cfi_endproc
 
 .L1k2:
        .word   1000
index 45339f5..d7fada9 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #ifndef _LOWLEVELLOCK_H
 #define _LOWLEVELLOCK_H        1
 
-#include <syscall.h>
+#ifndef __ASSEMBLER__
 #include <time.h>
 #include <sys/param.h>
 #include <bits/pthreadtypes.h>
+#include <bits/kernel-features.h>
+#endif
 
+#define SYS_futex              240
 #define FUTEX_WAIT             0
 #define FUTEX_WAKE             1
+#define FUTEX_CMP_REQUEUE      4
+#define FUTEX_WAKE_OP          5
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)                               \
+             & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
 
+#ifndef __ASSEMBLER__
 
 /* Initializer for compatibility lock.  */
-#define LLL_MUTEX_LOCK_INITIALIZER             (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED      (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS     (2)
-
-extern int __lll_mutex_lock_wait (int val, int *__futex) attribute_hidden;
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
-                                      const struct timespec *abstime)
-     attribute_hidden;
-extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
-
+#define LLL_LOCK_INITIALIZER           (0)
+#define LLL_LOCK_INITIALIZER_LOCKED    (1)
+#define LLL_LOCK_INITIALIZER_WAITERS   (2)
+
+extern int __lll_lock_wait_private (int val, int *__futex)
+  attribute_hidden;
+extern int __lll_lock_wait (int val, int *__futex, int private)
+  attribute_hidden;
+extern int __lll_timedlock_wait (int val, int *__futex,
+                                const struct timespec *abstime, int private)
+  attribute_hidden;
+extern int __lll_robust_lock_wait (int val, int *__futex, int private)
+  attribute_hidden;
+extern int __lll_robust_timedlock_wait (int val, int *__futex,
+                                       const struct timespec *abstime,
+                                       int private)
+  attribute_hidden;
+extern int __lll_unlock_wake_private (int *__futex) attribute_hidden;
+extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
+
+#define lll_trylock(futex) \
+  ({ unsigned char __result; \
+     __asm __volatile ("\
+       .align 2\n\
+       mova 1f,r0\n\
+       nop\n\
+       mov r15,r1\n\
+       mov #-8,r15\n\
+     0: mov.l @%1,r2\n\
+       cmp/eq r2,%3\n\
+       bf 1f\n\
+       mov.l %2,@%1\n\
+     1: mov r1,r15\n\
+       mov #-1,%0\n\
+       negc %0,%0"\
+       : "=r" (__result) \
+       : "r" (&(futex)), \
+         "r" (LLL_LOCK_INITIALIZER_LOCKED), \
+         "r" (LLL_LOCK_INITIALIZER) \
+       : "r0", "r1", "r2", "t", "memory"); \
+     __result; })
 
-#define lll_mutex_trylock(futex) \
+#define lll_robust_trylock(futex, id)  \
   ({ unsigned char __result; \
      __asm __volatile ("\
        .align 2\n\
@@ -57,12 +137,12 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
        negc %0,%0"\
        : "=r" (__result) \
        : "r" (&(futex)), \
-         "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), \
-         "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+         "r" (id), \
+         "r" (LLL_LOCK_INITIALIZER) \
        : "r0", "r1", "r2", "t", "memory"); \
      __result; })
 
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
   ({ unsigned char __result; \
      __asm __volatile ("\
        .align 2\n\
@@ -79,12 +159,12 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
        negc %0,%0"\
        : "=r" (__result) \
        : "r" (&(futex)), \
-         "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
-         "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+         "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+         "r" (LLL_LOCK_INITIALIZER) \
        : "r0", "r1", "r2", "t", "memory"); \
      __result; })
 
-#define lll_mutex_lock(futex) \
+#define lll_lock(futex, private) \
   (void) ({ int __result, *__futex = &(futex); \
            __asm __volatile ("\
                .align 2\n\
@@ -100,11 +180,37 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
                : "=&r" (__result) : "r" (1), "r" (__futex) \
                : "r0", "r1", "t", "memory"); \
            if (__result) \
-             __lll_mutex_lock_wait (__result, __futex); })
+             { \
+               if (__builtin_constant_p (private) \
+                   && (private) == LLL_PRIVATE) \
+                 __lll_lock_wait_private (__result, __futex); \
+               else \
+                 __lll_lock_wait (__result, __futex, (private));       \
+             } \
+    })
+
+#define lll_robust_lock(futex, id, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+       .align 2\n\
+       mova 1f,r0\n\
+       nop\n\
+       mov r15,r1\n\
+       mov #-8,r15\n\
+      0: mov.l @%2,%0\n\
+       tst %0,%0\n\
+       bf 1f\n\
+       mov.l %1,@%2\n\
+      1: mov r1,r15"\
+       : "=&r" (__result) : "r" (id), "r" (__futex) \
+       : "r0", "r1", "t", "memory"); \
+     if (__result) \
+       __result = __lll_robust_lock_wait (__result, __futex, private); \
+     __result; })
 
 /* Special version of lll_mutex_lock which causes the unlock function to
    always wakeup waiters.  */
-#define lll_mutex_cond_lock(futex) \
+#define lll_cond_lock(futex, private) \
   (void) ({ int __result, *__futex = &(futex); \
            __asm __volatile ("\
                .align 2\n\
@@ -120,9 +226,28 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
                : "=&r" (__result) : "r" (2), "r" (__futex) \
                : "r0", "r1", "t", "memory"); \
            if (__result) \
-             __lll_mutex_lock_wait (__result, __futex); })
+             __lll_lock_wait (__result, __futex, private); })
 
-#define lll_mutex_timedlock(futex, timeout) \
+#define lll_robust_cond_lock(futex, id, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+       .align 2\n\
+       mova 1f,r0\n\
+       nop\n\
+       mov r15,r1\n\
+       mov #-8,r15\n\
+     0: mov.l @%2,%0\n\
+       tst %0,%0\n\
+       bf 1f\n\
+       mov.l %1,@%2\n\
+     1: mov r1,r15"\
+       : "=&r" (__result) : "r" (id | FUTEX_WAITERS), "r" (__futex) \
+       : "r0", "r1", "t", "memory"); \
+      if (__result) \
+       __result = __lll_robust_lock_wait (__result, __futex, private); \
+      __result; })
+
+#define lll_timedlock(futex, timeout, private) \
   ({ int __result, *__futex = &(futex); \
      __asm __volatile ("\
        .align 2\n\
@@ -138,10 +263,30 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
        : "=&r" (__result) : "r" (1), "r" (__futex) \
        : "r0", "r1", "t", "memory"); \
     if (__result) \
-      __result = __lll_mutex_timedlock_wait (__result, __futex, timeout); \
+      __result = __lll_timedlock_wait (__result, __futex, timeout, private); \
     __result; })
 
-#define lll_mutex_unlock(futex) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+       .align 2\n\
+       mova 1f,r0\n\
+       nop\n\
+       mov r15,r1\n\
+       mov #-8,r15\n\
+     0: mov.l @%2,%0\n\
+       tst %0,%0\n\
+       bf 1f\n\
+       mov.l %1,@%2\n\
+     1: mov r1,r15"\
+       : "=&r" (__result) : "r" (id), "r" (__futex) \
+       : "r0", "r1", "t", "memory"); \
+    if (__result) \
+      __result = __lll_robust_timedlock_wait (__result, __futex, \
+                                             timeout, private); \
+    __result; })
+
+#define lll_unlock(futex, private) \
   (void) ({ int __result, *__futex = &(futex); \
            __asm __volatile ("\
                .align 2\n\
@@ -155,22 +300,45 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
                : "=&r" (__result) : "r" (__futex) \
                : "r0", "r1", "memory"); \
            if (__result) \
-             __lll_mutex_unlock_wake (__futex); })
-
-#define lll_mutex_islocked(futex) \
-  (futex != 0)
-
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
-/* Initializers for lock.  */
-#define LLL_LOCK_INITIALIZER           (0)
-#define LLL_LOCK_INITIALIZER_LOCKED    (1)
+             { \
+               if (__builtin_constant_p (private) \
+                   && (private) == LLL_PRIVATE) \
+                 __lll_unlock_wake_private (__futex); \
+               else \
+                 __lll_unlock_wake (__futex, (private)); \
+             } \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  (void) ({ int __result, *__futex = &(futex); \
+           __asm __volatile ("\
+               .align 2\n\
+               mova 1f,r0\n\
+               mov r15,r1\n\
+               mov #-6,r15\n\
+            0: mov.l @%1,%0\n\
+               and %2,%0\n\
+               mov.l %0,@%1\n\
+            1: mov r1,r15"\
+               : "=&r" (__result) : "r" (__futex), "r" (FUTEX_WAITERS) \
+               : "r0", "r1", "memory");        \
+           if (__result) \
+             __lll_unlock_wake (__futex, private); })
 
+#define lll_robust_dead(futex, private)                       \
+  (void) ({ int __ignore, *__futex = &(futex); \
+           __asm __volatile ("\
+               .align 2\n\
+               mova 1f,r0\n\
+               mov r15,r1\n\
+               mov #-6,r15\n\
+            0: mov.l @%1,%0\n\
+               or %2,%0\n\
+               mov.l %0,@%1\n\
+            1: mov r1,r15"\
+               : "=&r" (__ignore) : "r" (__futex), "r" (FUTEX_OWNER_DIED) \
+               : "r0", "r1", "memory");        \
+           lll_futex_wake (__futex, 1, private); })
 
 # ifdef NEED_SYSCALL_INST_PAD
 #  define SYSCALL_WITH_INST_PAD "\
@@ -180,27 +348,17 @@ typedef int lll_lock_t;
        trapa #0x14"
 # endif
 
-#define lll_futex_wait(futex, val) \
-  do {                                                                       \
-    int __ignore;                                                            \
-    register unsigned long __r3 __asm ("r3") = SYS_futex;                            \
-    register unsigned long __r4 __asm ("r4") = (unsigned long) (futex);              \
-    register unsigned long __r5 __asm ("r5") = FUTEX_WAIT;                   \
-    register unsigned long __r6 __asm ("r6") = (unsigned long) (val);        \
-    register unsigned long __r7 __asm ("r7") = 0;                                    \
-    __asm __volatile (SYSCALL_WITH_INST_PAD                                  \
-                     : "=z" (__ignore)                                       \
-                     : "r" (__r3), "r" (__r4), "r" (__r5),                   \
-                       "r" (__r6), "r" (__r7)                                \
-                     : "memory", "t");                                       \
-  } while (0)
+#define lll_futex_wait(futex, val, private) \
+  lll_futex_timed_wait (futex, val, NULL, private)
+
 
-#define lll_futex_timed_wait(futex, val, timeout) \
+#define lll_futex_timed_wait(futex, val, timeout, private) \
   ({                                                                         \
     int __status;                                                            \
     register unsigned long __r3 __asm ("r3") = SYS_futex;                            \
     register unsigned long __r4 __asm ("r4") = (unsigned long) (futex);              \
-    register unsigned long __r5 __asm ("r5") = FUTEX_WAIT;                   \
+    register unsigned long __r5 __asm ("r5")                                 \
+      = __lll_private_flag (FUTEX_WAIT, private);                            \
     register unsigned long __r6 __asm ("r6") = (unsigned long) (val);        \
     register unsigned long __r7 __asm ("r7") = (timeout);                            \
     __asm __volatile (SYSCALL_WITH_INST_PAD                                  \
@@ -212,12 +370,13 @@ typedef int lll_lock_t;
   })
 
 
-#define lll_futex_wake(futex, nr) \
+#define lll_futex_wake(futex, nr, private) \
   do {                                                                       \
     int __ignore;                                                            \
     register unsigned long __r3 __asm ("r3") = SYS_futex;                            \
     register unsigned long __r4 __asm ("r4") = (unsigned long) (futex);              \
-    register unsigned long __r5 __asm ("r5") = FUTEX_WAKE;                   \
+    register unsigned long __r5 __asm ("r5")                                 \
+      = __lll_private_flag (FUTEX_WAKE, private);                            \
     register unsigned long __r6 __asm ("r6") = (unsigned long) (nr);         \
     register unsigned long __r7 __asm ("r7") = 0;                                    \
     __asm __volatile (SYSCALL_WITH_INST_PAD                                  \
@@ -228,33 +387,19 @@ typedef int lll_lock_t;
   } while (0)
 
 
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-    2  -  taken by more users */
-
-#define lll_trylock(futex) lll_mutex_trylock (futex)
-#define lll_lock(futex) lll_mutex_lock (futex)
-#define lll_unlock(futex) lll_mutex_unlock (futex)
-
 #define lll_islocked(futex) \
   (futex != LLL_LOCK_INITIALIZER)
 
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
    afterwards.  */
 
-extern int __lll_wait_tid (int *tid) attribute_hidden;
 #define lll_wait_tid(tid) \
   do {                                                                       \
     __typeof (tid) *__tid = &(tid);                                          \
     while (*__tid != 0)                                                              \
-      lll_futex_wait (__tid, *__tid);                                        \
+      lll_futex_wait (__tid, *__tid, LLL_SHARED);                            \
   } while (0)
 
 extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
@@ -271,24 +416,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
       }                                                                              \
     __result; })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
+#endif  /* !__ASSEMBLER__ */
 
 #endif  /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..dab1ae4
--- /dev/null
@@ -0,0 +1,264 @@
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007
+   Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <bits/kernel-features.h>
+#include "lowlevel-atomic.h"
+
+       .text
+
+#define FUTEX_WAITERS          0x80000000
+#define FUTEX_OWNER_DIED       0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+       mov     #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+       extu.b  tmp, tmp; \
+       xor     tmp, reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, tmp2       ; \
+       add     tmp2, tmp       ; \
+       mov.l   @tmp, tmp2      ; \
+       bra     98f             ; \
+        mov    #FUTEX_PRIVATE_FLAG, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    extu.b  tmp, tmp        ; \
+       xor     tmp, reg        ; \
+       and     tmp2, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+       stc     gbr, tmp        ; \
+       mov.w   99f, tmp2       ; \
+       add     tmp2, tmp       ; \
+       mov.l   @tmp, tmp2      ; \
+       bra     98f             ; \
+        mov    #FUTEX_PRIVATE_FLAG, tmp ; \
+99:    .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:    extu.b  tmp, tmp        ; \
+       xor     tmp, reg        ; \
+       and     tmp2, reg       ; \
+       mov     #FUTEX_WAIT, tmp ; \
+       or      tmp, reg
+# endif
+#endif
+
+       .globl  __lll_robust_lock_wait
+       .type   __lll_robust_lock_wait,@function
+       .hidden __lll_robust_lock_wait
+       .align  5
+       cfi_startproc
+__lll_robust_lock_wait:
+       mov.l   r8, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r8, 0)
+       mov     r5, r8
+       mov     #0, r7          /* No timeout.  */
+       mov     r6, r5
+       LOAD_FUTEX_WAIT (r5, r0, r1)
+
+4:
+       mov     r4, r6
+       mov.l   .L_FUTEX_WAITERS, r0
+       or      r0, r6
+       shlr    r0              /* r0 = FUTEX_OWNER_DIED */
+       tst     r0, r4
+       bf/s    3f
+        cmp/eq r4, r6
+       bt      1f
+
+       CMPXCHG (r4, @r8, r6, r2)
+       bf      2f
+
+1:
+       mov     r8, r4
+       mov     #SYS_futex, r3
+       extu.b  r3, r3
+       trapa   #0x14
+       SYSCALL_INST_PAD
+
+       mov.l   @r8, r2
+
+2:
+       tst     r2, r2
+       bf/s    4b
+        mov    r2, r4
+
+       stc     gbr, r1
+       mov.w   .Ltidoff, r2
+       add     r2, r1
+       mov.l   @r1, r6
+       mov     #0, r3
+       CMPXCHG (r3, @r8, r6, r4)
+       bf      4b
+       mov     #0, r4
+
+3:
+       mov.l   @r15+, r8
+       ret
+        mov    r4, r0
+       cfi_endproc
+       .align  2
+.L_FUTEX_WAITERS:
+       .long   FUTEX_WAITERS
+.Ltidoff:
+       .word   TID - TLS_PRE_TCB_SIZE
+       .size   __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+       .globl  __lll_robust_timedlock_wait
+       .type   __lll_robust_timedlock_wait,@function
+       .hidden __lll_robust_timedlock_wait
+       .align  5
+       cfi_startproc
+__lll_robust_timedlock_wait:
+       /* Check for a valid timeout value.  */
+       mov.l   @(4,r6), r1
+       mov.l   .L1g, r0
+       cmp/hs  r0, r1
+       bt      3f
+
+       mov.l   r11, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r11, 0)
+       mov.l   r10, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r10, 0)
+       mov.l   r9, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r9, 0)
+       mov.l   r8, @-r15
+       cfi_adjust_cfa_offset(4)
+       cfi_rel_offset (r8, 0)
+       mov     r7, r11
+       mov     r4, r10
+       mov     r6, r9
+       mov     r5, r8
+
+       /* Stack frame for the timespec and timeval structs.  */
+       add     #-8, r15
+       cfi_adjust_cfa_offset(8)
+
+1:
+       /* Get current time.  */
+       mov     r15, r4
+       mov     #0, r5
+       mov     #__NR_gettimeofday, r3
+       trapa   #0x12
+       SYSCALL_INST_PAD
+
+       /* Compute relative timeout.  */
+       mov.l   @(4,r15), r0
+       mov.w   .L1k, r1
+       dmulu.l r0, r1          /* Micro seconds to nano seconds.  */
+       mov.l   @r9, r2
+       mov.l   @(4,r9), r3
+       mov.l   @r15, r0
+       sts     macl, r1
+       sub     r0, r2
+       clrt
+       subc    r1, r3
+       bf      4f
+       mov.l   .L1g, r1
+       add     r1, r3
+       add     #-1, r2
+4:
+       cmp/pz  r2
+       bf      8f              /* Time is already up.  */
+
+       mov.l   r2, @r15        /* Store relative timeout.  */
+       mov.l   r3, @(4,r15)
+
+       mov     r10, r6
+       mov.l   .L_FUTEX_WAITERS2, r0
+       or      r0, r6
+       shlr    r0              /* r0 = FUTEX_OWNER_DIED */
+       tst     r0, r4
+       bf/s    6f
+        cmp/eq r4, r6
+       bt      2f
+
+       CMPXCHG (r4, @r8, r6, r2)
+       bf/s    5f
+        mov    #0, r5
+
+2:
+       mov     r8, r4
+       mov     r11, r5
+       LOAD_FUTEX_WAIT (r5, r0, r1)
+       mov     r10, r6
+       mov     r15, r7
+       mov     #SYS_futex, r3
+       extu.b  r3, r3
+       trapa   #0x14
+       SYSCALL_INST_PAD
+       mov     r0, r5
+
+       mov.l   @r8, r2
+
+5:
+       tst     r2, r2
+       bf/s    7f
+        mov    r2, r10
+
+       stc     gbr, r1
+       mov.w   .Ltidoff2, r2
+       add     r2, r1
+       mov.l   @r1, r4
+       mov     #0, r3
+       CMPXCHG (r3, @r8, r4, r10)
+       bf      7f
+       mov     #0, r0
+
+6:
+       add     #8, r15
+       mov.l   @r15+, r8
+       mov.l   @r15+, r9
+       mov.l   @r15+, r10
+       rts
+        mov.l  @r15+, r11
+
+7:
+       /* Check whether the time expired.  */
+       mov     #-ETIMEDOUT, r1
+       cmp/eq  r5, r1
+       bf      1b
+
+8:
+       bra     6b
+        mov    #ETIMEDOUT, r0
+3:
+       rts
+        mov    #EINVAL, r0
+       cfi_endproc
+       .align  2
+.L_FUTEX_WAITERS2:
+       .long   FUTEX_WAITERS
+.L1g:
+       .long   1000000000
+.Ltidoff2:
+       .word   TID - TLS_PRE_TCB_SIZE
+.L1k:
+       .word   1000
+       .size   __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
index eb83653..acf1a61 100644 (file)
@@ -1,82 +1 @@
-/* Uncancelable versions of cancelable interfaces.  Linux/NPTL version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
-
-#include <sysdep.h>
-
-#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
-extern int __open_nocancel (const char *, int, ...) attribute_hidden;
-extern int __close_nocancel (int) attribute_hidden;
-extern int __read_nocancel (int, void *, size_t) attribute_hidden;
-extern int __write_nocancel (int, const void *, size_t) attribute_hidden;
-extern pid_t __waitpid_nocancel (pid_t, int *, int) attribute_hidden;
-
-libc_hidden_proto(__open_nocancel)
-libc_hidden_proto(__close_nocancel)
-libc_hidden_proto(__read_nocancel)
-libc_hidden_proto(__write_nocancel)
-libc_hidden_proto(__waitpid_nocancel)
-
-#else
-#define __open_nocancel(name, ...) __open (name, __VA_ARGS__)
-#define __close_nocancel(fd) __close (fd)
-#define __read_nocancel(fd, buf, len) __read (fd, buf, len)
-#define __write_nocancel(fd, buf, len) __write (fd, buf, len)
-#define __waitpid_nocancel(pid, stat_loc, options) \
-  __waitpid (pid, stat_loc, options)
-#endif
-
-/* Uncancelable open.  */
-#define open_not_cancel(name, flags, mode) \
-   __open_nocancel (name, flags, mode)
-#define open_not_cancel_2(name, flags) \
-   __open_nocancel (name, flags)
-
-/* Uncancelable close.  */
-#define close_not_cancel(fd) \
-  __close_nocancel (fd)
-#define close_not_cancel_no_status(fd) \
-  (void) ({ INTERNAL_SYSCALL_DECL (err);                                     \
-           INTERNAL_SYSCALL (close, err, 1, (fd)); })
-
-/* Uncancelable read.  */
-#define read_not_cancel(fd, buf, n) \
-  __read_nocancel (fd, buf, n)
-
-/* Uncancelable write.  */
-#define write_not_cancel(fd, buf, n) \
-  __write_nocancel (fd, buf, n)
-
-/* Uncancelable writev.  */
-#define writev_not_cancel_no_status(fd, iov, n) \
-  (void) ({ INTERNAL_SYSCALL_DECL (err);                                     \
-           INTERNAL_SYSCALL (writev, err, 3, (fd), (iov), (n)); })
-
-/* Uncancelable fcntl.  */
-#define fcntl_not_cancel(fd, cmd, val) \
-  __fcntl_nocancel (fd, cmd, val)
-
-/* Uncancelable waitpid.  */
-#ifdef __NR_waitpid
-# define waitpid_not_cancel(pid, stat_loc, options) \
-  __waitpid_nocancel (pid, stat_loc, options)
-#else
-# define waitpid_not_cancel(pid, stat_loc, options) \
-  INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL)
-#endif
+#include "../i386/not-cancel.h"
index 5391d5c..82c97c3 100644 (file)
@@ -1,5 +1,5 @@
-/* Special .init and .fini section support for SH. NPTL version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+/* Special .init and .fini section support for SH.  NPTL version.
+   Copyright (C) 2003, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it
@@ -62,19 +62,12 @@ _init:\n\
        mov.l   .L24,r1\n\
        add     r0,r1\n\
        jsr     @r1\n\
-        nop\n\
-       mova    .L23,r0\n\
-       mov.l   .L23,r1\n\
-       add     r0,r1\n\
-       jsr     @r1\n\
         mov    r15,r14\n\
        bra     1f\n\
         nop\n\
        .align 2\n\
 .L22:\n\
        .long   _GLOBAL_OFFSET_TABLE_\n\
-.L23:\n\
-       .long   __gmon_start__@PLT\n\
 .L24:\n\
        .long   __pthread_initialize_minimal_internal@PLT\n\
 1:\n\
@@ -91,16 +84,6 @@ _init:\n\
        rts     \n\
        mov.l   @r15+,r12\n\
        END_INIT\n\
-       .section .text\n\
-       .align 5\n\
-       .weak   __gmon_start__\n\
-       .type   __gmon_start__,@function\n\
-__gmon_start__:\n\
-       mov.l   r14,@-r15\n\
-       mov     r15,r14\n\
-       mov     r14,r15\n\
-       rts     \n\
-       mov.l   @r15+,r14\n\
        \n\
 /*@_init_EPILOG_ENDS*/\n\
 \n\
index 608c736..4a6059a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelbarrier.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT     0
-#define FUTEX_WAKE     1
-
-
        .text
 
        .globl  pthread_barrier_wait
@@ -64,7 +61,13 @@ pthread_barrier_wait:
 #if CURR_EVENT != 0
        add     #CURR_EVENT, r4
 #endif
+#if FUTEX_WAIT == 0
+       mov.l   @(PRIVATE,r8), r5
+#else
        mov     #FUTEX_WAIT, r5
+       mov.l   @(PRIVATE,r8), r0
+       or      r0, r5
+#endif
        mov     #0, r7
 8:
        mov     #SYS_futex, r3
@@ -81,8 +84,10 @@ pthread_barrier_wait:
 
        /* Increment LEFT.  If this brings the count back to the
           initial count unlock the object.  */
-       INC     (@(LEFT,r8), r2)
+       mov     #1, r3
        mov.l   @(INIT_COUNT,r8), r4
+       XADD    (r3, @(LEFT,r8), r2, r5)
+       add     #-1, r4
        cmp/eq  r2, r4
        bf      10f
 
@@ -115,6 +120,8 @@ pthread_barrier_wait:
 #endif
        mov     #0, r7
        mov     #FUTEX_WAKE, r5
+       mov.l   @(PRIVATE,r8), r0
+       or      r0, r5
        mov     #SYS_futex, r3
        extu.b  r3, r3
        trapa   #0x14
@@ -122,8 +129,10 @@ pthread_barrier_wait:
 
        /* Increment LEFT.  If this brings the count back to the
           initial count unlock the object.  */
-       INC     (@(LEFT,r8), r2)
+       mov     #1, r3
        mov.l   @(INIT_COUNT,r8), r4
+       XADD    (r3, @(LEFT,r8), r2, r5)
+       add     #-1, r4
        cmp/eq  r2, r4
        bf      5f
 
@@ -139,6 +148,10 @@ pthread_barrier_wait:
         mov.l  @r15+, r9
 
 1:
+       mov.l   @(PRIVATE,r8), r6
+       mov     #LLL_SHARED, r0
+       extu.b  r0, r0
+       xor     r0, r6
        mov     r2, r4
        mov     r8, r5
        mov.l   .Lwait0, r1
@@ -149,6 +162,10 @@ pthread_barrier_wait:
         nop
 
 4:
+       mov.l   @(PRIVATE,r8), r5
+       mov     #LLL_SHARED, r0
+       extu.b  r0, r0
+       xor     r0, r5
        mov     r8, r4
        mov.l   .Lwake0, r1
        bsrf    r1
@@ -159,6 +176,10 @@ pthread_barrier_wait:
 
 6:
        mov     r6, r9
+       mov.l   @(PRIVATE,r8), r5
+       mov     #LLL_SHARED, r0
+       extu.b  r0, r0
+       xor     r0, r5
        mov     r8, r4
        mov.l   .Lwake1, r1
        bsrf    r1
@@ -167,8 +188,12 @@ pthread_barrier_wait:
        bra     7b
         mov    r9, r6
 
-9:
+9:     
        mov     r6, r9
+       mov.l   @(PRIVATE,r8), r5
+       mov     #LLL_SHARED, r0
+       extu.b  r0, r0
+       xor     r0, r5
        mov     r8, r4
        mov.l   .Lwake2, r1
        bsrf    r1
@@ -181,11 +206,11 @@ pthread_barrier_wait:
 .Lall:
        .long   0x7fffffff
 .Lwait0:
-       .long   __lll_mutex_lock_wait-.Lwait0b
+       .long   __lll_lock_wait-.Lwait0b
 .Lwake0:
-       .long   __lll_mutex_unlock_wake-.Lwake0b
+       .long   __lll_unlock_wake-.Lwake0b
 .Lwake1:
-       .long   __lll_mutex_unlock_wake-.Lwake1b
+       .long   __lll_unlock_wake-.Lwake1b
 .Lwake2:
-       .long   __lll_mutex_unlock_wake-.Lwake2b
+       .long   __lll_unlock_wake-.Lwake2b
        .size   pthread_barrier_wait,.-pthread_barrier_wait
index 36eccf1..3825124 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <bits/kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-#define FUTEX_REQUEUE          3
-#define FUTEX_CMP_REQUEUE      4
-
-#define EINVAL                 22
-
        .text
 
        /* int pthread_cond_broadcast (pthread_cond_t *cond) */
@@ -96,8 +92,24 @@ __pthread_cond_broadcast:
        bt/s    9f
         add    #cond_futex, r4
 
+       /* XXX: The kernel only supports FUTEX_CMP_REQUEUE to the same
+          type of futex (private resp. shared).  */
+       mov.l   @(MUTEX_KIND,r9), r0
+       tst     #(PI_BIT|PS_BIT), r0
+       bf      9f
+
        /* Wake up all threads.  */
-       mov     #FUTEX_CMP_REQUEUE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_CMP_REQUEUE, r0
+       or      r0, r5
+#endif
        mov     #1, r6
        mov     #-1, r7
        shlr    r7              /* r7 = 0x7fffffff */
@@ -154,10 +166,17 @@ __pthread_cond_broadcast:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait5, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait5, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait5b:
+.Lwait5b:
        bra     2b
         nop
 
@@ -167,10 +186,16 @@ __pthread_cond_broadcast:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake5, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lwake5, r1
        bsrf    r1
-        nop
-.Lmwake5b:
+        extu.b r5, r5
+.Lwake5b:
        bra     6b
         nop
 
@@ -180,15 +205,36 @@ __pthread_cond_broadcast:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake6, r1
+       mov     #-1, r0
+       cmp/eq  r0, r9
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lwake6, r1
        bsrf    r1
-        nop
-.Lmwake6b:
+        extu.b r5, r5
+.Lwake6b:
        bra     8b
         nop
 
 9:
-       mov     #FUTEX_WAKE, r5
+       mov     #-1, r0
+       cmp/eq  r0, r9
+       bt/s    99f
+        mov    #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
+99:
        mov     #-1, r6
        shlr    r6              /* r6 = 0x7fffffff */
        mov     #0, r7
@@ -199,12 +245,17 @@ __pthread_cond_broadcast:
        bra     10b
         nop
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+
        .align  2
-.Lmwait5:
-       .long   __lll_mutex_lock_wait-.Lmwait5b
-.Lmwake5:
-       .long   __lll_mutex_unlock_wake-.Lmwake5b
-.Lmwake6:
-       .long   __lll_mutex_unlock_wake-.Lmwake6b
+.Lwait5:
+       .long   __lll_lock_wait-.Lwait5b
+.Lwake5:
+       .long   __lll_unlock_wake-.Lwake5b
+.Lwake6:
+       .long   __lll_unlock_wake-.Lwake6b
        .size   __pthread_cond_broadcast, .-__pthread_cond_broadcast
 weak_alias (__pthread_cond_broadcast, pthread_cond_broadcast)
index d92f11c..914a1ba 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <bits/kernel-features.h>
+#include <pthread-errnos.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-#define FUTEX_REQUEUE          3
-
-#define EINVAL                 22
-
        .text
 
        /* int pthread_cond_signal (pthread_cond_t *cond) */
@@ -77,14 +73,63 @@ __pthread_cond_signal:
        /* Wake up one thread.  */
        mov     r8, r4
        add     #cond_futex, r4
-       mov     #FUTEX_WAKE, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAKE_OP, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE_OP, r0
+       or      r0, r5
+#endif
+99:
        mov     #1, r6
        mov     #0, r7
+       mov     r8, r0
+       add     #cond_lock, r0
+       mov.l   .Lfutexop, r1
        mov     #SYS_futex, r3
        extu.b  r3, r3
        trapa   #0x14
        SYSCALL_INST_PAD
 
+       /* For any kind of error, we try again with WAKE.
+          The general test also covers running on old kernels.  */
+       mov     r0, r1
+       mov     #-12, r2
+       shad    r2, r1
+       not     r1, r1
+       tst     r1, r1
+       bt      7f
+
+6:
+       mov     #0, r0
+       lds.l   @r15+, pr
+       rts
+        mov.l  @r15+, r8
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+       .align  2
+.Lfutexop:
+       .long   FUTEX_OP_CLEAR_WAKE_IF_GT_ONE
+
+7:
+       /* r5 should be either FUTEX_WAKE_OP or
+          FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG from the previous syscall.  */
+       mov     #(FUTEX_WAKE ^ FUTEX_WAKE_OP), r0
+       xor     r0, r5
+       trapa   #0x14
+       SYSCALL_INST_PAD
+
 4:
        /* Unlock.  */
 #if cond_lock != 0
@@ -93,12 +138,26 @@ __pthread_cond_signal:
        DEC (@r8, r2)
 #endif
        tst     r2, r2
-       bf      5f
-6:
-       mov     #0, r0
-       lds.l   @r15+, pr
-       rts
-        mov.l  @r15+, r8
+       bt      6b
+
+5:
+       /* Unlock in loop requires wakeup.  */
+       mov     r8, r4
+#if cond_lock != 0
+       add     #cond_lock, r4
+#endif
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:    
+       mov.l   .Lwake4, r1
+       bsrf    r1
+        extu.b r5, r5
+.Lwake4b:
+       bra     6b
+        nop
 
 1:
        /* Initial locking failed.  */
@@ -106,30 +165,24 @@ __pthread_cond_signal:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait4, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:    
+       extu.b  r6, r6
+       mov.l   .Lwait4, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait4b:
+.Lwait4b:
        bra     2b
         nop
 
-5:
-       /* Unlock in loop requires wakeup.  */
-       mov     r8, r4
-#if cond_lock != 0
-       add     #cond_lock, r4
-#endif
-       mov.l   .Lmwake4, r1
-       bsrf    r1
-        nop
-.Lmwake4b:
-       bra     6b
-        nop
-
        .align  2
-.Lmwait4:
-       .long   __lll_mutex_lock_wait-.Lmwait4b
-.Lmwake4:
-       .long   __lll_mutex_unlock_wake-.Lmwake4b
+.Lwait4:
+       .long   __lll_lock_wait-.Lwait4b
+.Lwake4:
+       .long   __lll_unlock_wake-.Lwake4b
        .size   __pthread_cond_signal, .-__pthread_cond_signal
 weak_alias (__pthread_cond_signal, pthread_cond_signal)
index 5812488..3e11756 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
 #include "lowlevel-atomic.h"
 
-#define SYS_gettimeofday       __NR_gettimeofday
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-
        .text
 
 /* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
@@ -119,7 +116,7 @@ __pthread_cond_timedwait:
        mov.l   @(cond_futex,r8), r0
        add     r2, r0
        mov.l   r0, @(cond_futex,r8)
-       mov     #(1 << clock_bits), r2
+       mov     #(1 << nwaiters_shift), r2
        mov.l   @(cond_nwaiters,r8), r0
        add     r2, r0
        mov.l   r0, @(cond_nwaiters,r8)
@@ -135,7 +132,7 @@ __pthread_cond_timedwait:
 #ifdef __NR_clock_gettime
        /* Get the clock number.         */
        mov.l   @(cond_nwaiters,r8), r4
-       mov     #((1 << clock_bits) - 1), r0
+       mov     #((1 << nwaiters_shift) - 1), r0
        and     r0, r4
        /* Only clocks 0 and 1 are allowed.  Both are handled in the
           kernel.  */
@@ -163,7 +160,7 @@ __pthread_cond_timedwait:
        mov     r15, r4
        add     #16, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -181,7 +178,7 @@ __pthread_cond_timedwait:
        mov     r15, r4
        add     #16, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -233,7 +230,22 @@ __pthread_cond_timedwait:
 
        mov     r15, r7
        add     #16, r7
-       mov     #FUTEX_WAIT, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+#endif
+99:
        mov.l   @(8,r15), r6
        mov     r8, r4
        add     #cond_futex, r4
@@ -322,7 +334,7 @@ __pthread_cond_timedwait:
        mov.l   r1,@(woken_seq+4,r8)
 
 24:
-       mov     #(1 << clock_bits), r2
+       mov     #(1 << nwaiters_shift), r2
        mov.l   @(cond_nwaiters,r8),r0
        sub     r2, r0
        mov.l   r0,@(cond_nwaiters,r8)
@@ -334,7 +346,7 @@ __pthread_cond_timedwait:
        not     r0, r0
        cmp/eq  #0, r0
        bf/s    25f
-        mov    #((1 << clock_bits) - 1), r1
+        mov    #((1 << nwaiters_shift) - 1), r1
        not     r1, r1
        mov.l   @(cond_nwaiters,r8),r0
        tst     r1, r0
@@ -342,7 +354,22 @@ __pthread_cond_timedwait:
 
        mov     r8, r4
        add     #cond_nwaiters, r4
-       mov     #FUTEX_WAKE, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
+99:
        mov     #1, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -382,6 +409,10 @@ __pthread_cond_timedwait:
        rts
         mov.l  @r15+, r8
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
 .L1k:
        .word   1000
        .align  2
@@ -402,10 +433,17 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait2, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait2, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait2b:
+.Lwait2b:
        bra     2b
         nop
 
@@ -415,10 +453,16 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake2, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lmwait2, r1
        bsrf    r1
-        nop
-.Lmwake2b:
+        extu.b r5, r5
+.Lmwait2b:
        bra     4b
         nop
 
@@ -428,10 +472,17 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait3, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait3, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait3b:
+.Lwait3b:
        bra     6b
         nop
 
@@ -441,10 +492,16 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake3, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lmwait3, r1
        bsrf    r1
-        nop
-.Lmwake3b:
+        extu.b r5, r5
+.Lmwait3b:
        bra     11b
         nop
 
@@ -463,25 +520,31 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake4, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lmwait4, r1
        bsrf    r1
-        nop
-.Lmwake4b:
+        extu.b r5, r5
+.Lmwait4b:
 17:
        bra     18b
         mov.l  @(24,r15), r0
 
        .align  2
+.Lwait2:
+       .long   __lll_lock_wait-.Lwait2b
 .Lmwait2:
-       .long   __lll_mutex_lock_wait-.Lmwait2b
-.Lmwake2:
-       .long   __lll_mutex_unlock_wake-.Lmwake2b
+       .long   __lll_unlock_wake-.Lmwait2b
+.Lwait3:
+       .long   __lll_lock_wait-.Lwait3b
 .Lmwait3:
-       .long   __lll_mutex_lock_wait-.Lmwait3b
-.Lmwake3:
-       .long   __lll_mutex_unlock_wake-.Lmwake3b
-.Lmwake4:
-       .long   __lll_mutex_unlock_wake-.Lmwake4b
+       .long   __lll_unlock_wake-.Lmwait3b
+.Lmwait4:
+       .long   __lll_unlock_wake-.Lmwait4b
        .size   __pthread_cond_timedwait, .-__pthread_cond_timedwait
 weak_alias (__pthread_cond_timedwait, pthread_cond_timedwait)
 
@@ -505,10 +568,17 @@ __condvar_tw_cleanup:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait5, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait5, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait5b:
+.Lwait5b:
 
 1:
        mov.l   @(broadcast_seq,r8), r0
@@ -519,6 +589,21 @@ __condvar_tw_cleanup:
        mov     #1, r2
        mov     #0, r3
 
+       /* We increment the wakeup_seq counter only if it is lower than
+          total_seq.  If this is not the case the thread was woken and
+          then canceled.  In this case we ignore the signal.  */
+       mov.l   @(total_seq+4,r8), r0
+       mov.l   @(wakeup_seq+4,r8), r1
+       cmp/hi  r1, r0
+       bt/s    6f
+        cmp/hi r0, r1
+       bt      7f
+       mov.l   @(total_seq,r8), r0
+       mov.l   @(wakeup_seq,r8), r1
+       cmp/hs  r0, r1
+       bt      7f
+
+6:
        clrt
        mov.l   @(wakeup_seq,r8),r0
        mov.l   @(wakeup_seq+4,r8),r1
@@ -530,6 +615,7 @@ __condvar_tw_cleanup:
        add     r2, r0
        mov.l   r0,@(cond_futex,r8)
 
+7:
        clrt
        mov.l   @(woken_seq,r8),r0
        mov.l   @(woken_seq+4,r8),r1
@@ -539,7 +625,7 @@ __condvar_tw_cleanup:
        mov.l   r1,@(woken_seq+4,r8)
 
 3:
-       mov     #(1 << clock_bits), r2
+       mov     #(1 << nwaiters_shift), r2
        mov.l   @(cond_nwaiters,r8),r0
        sub     r2, r0
        mov.l   r0,@(cond_nwaiters,r8)
@@ -552,7 +638,7 @@ __condvar_tw_cleanup:
        not     r0, r0
        cmp/eq  #0, r0
        bf/s    4f
-        mov    #((1 << clock_bits) - 1), r1
+        mov    #((1 << nwaiters_shift) - 1), r1
        not     r1, r1
        mov.l   @(cond_nwaiters,r8),r0
        tst     r1, r0
@@ -582,10 +668,16 @@ __condvar_tw_cleanup:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake5, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lmwait5, r1
        bsrf    r1
-        nop
-.Lmwake5b:
+        extu.b r5, r5
+.Lmwait5b:
 
 2:
        /* Wake up all waiters to make sure no signal gets lost.  */
@@ -618,10 +710,10 @@ __condvar_tw_cleanup:
        sleep
 
        .align  2
+.Lwait5:
+       .long   __lll_lock_wait-.Lwait5b
 .Lmwait5:
-       .long   __lll_mutex_lock_wait-.Lmwait5b
-.Lmwake5:
-        .long   __lll_mutex_unlock_wake-.Lmwake5b
+        .long   __lll_unlock_wake-.Lmwait5b
 .Lmlocki5:
        .long   __pthread_mutex_cond_lock-.Lmlocki5b
 .Lresume:
index c7df9bf..5a897f6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-
        .text
 
 /* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
@@ -105,7 +104,7 @@ __pthread_cond_wait:
        mov.l   @(cond_futex,r8),r0
        add     r2, r0
        mov.l   r0,@(cond_futex,r8)
-       mov     #(1 << clock_bits), r2
+       mov     #(1 << nwaiters_shift), r2
        mov.l   @(cond_nwaiters,r8), r0
        add     r2, r0
        mov.l   r0, @(cond_nwaiters,r8)
@@ -137,7 +136,22 @@ __pthread_cond_wait:
        mov.l   r0, @r15
 
        mov     #0, r7
-       mov     #FUTEX_WAIT, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff0, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+#endif
+99:
        mov.l   @(8,r15), r6
        mov     r8, r4
        add     #cond_futex, r4
@@ -195,7 +209,7 @@ __pthread_cond_wait:
        mov.l   r1,@(woken_seq+4,r8)
 
 16:
-       mov     #(1 << clock_bits), r2
+       mov     #(1 << nwaiters_shift), r2
        mov.l   @(cond_nwaiters,r8),r0
        sub     r2, r0
        mov.l   r0,@(cond_nwaiters,r8)
@@ -207,7 +221,7 @@ __pthread_cond_wait:
        not     r0, r0
        cmp/eq  #0, r0
        bf/s    17f
-        mov    #((1 << clock_bits) - 1), r1
+        mov    #((1 << nwaiters_shift) - 1), r1
        not     r1, r1
        mov.l   @(cond_nwaiters,r8),r0
        tst     r1, r0
@@ -215,7 +229,22 @@ __pthread_cond_wait:
 
        mov     r8, r4
        add     #cond_nwaiters, r4
-       mov     #FUTEX_WAKE, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff0, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
+99:
        mov     #1, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -249,6 +278,10 @@ __pthread_cond_wait:
        rts
         mov.l  @r15+, r8
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff0:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
        .align  2
 .Lmunlock0:
        .long   __pthread_mutex_unlock_usercnt-.Lmunlock0b
@@ -265,10 +298,17 @@ __pthread_cond_wait:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait0, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait0, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait0b:
+.Lwait0b:
        bra     2b
         nop
 3:
@@ -277,10 +317,16 @@ __pthread_cond_wait:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake0, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lwake0, r1
        bsrf    r1
-        nop
-.Lmwake0b:
+        extu.b r5, r5
+.Lwake0b:
        bra     4b
         nop
 
@@ -290,10 +336,17 @@ __pthread_cond_wait:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait1, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait1, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait1b:
+.Lwait1b:
        bra     6b
         nop
 
@@ -303,10 +356,16 @@ __pthread_cond_wait:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake1, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lwake1, r1
        bsrf    r1
-        nop
-.Lmwake1b:
+        extu.b r5, r5
+.Lwake1b:
        bra     11b
         nop
 
@@ -325,26 +384,32 @@ __pthread_cond_wait:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake2, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lwake2, r1
        bsrf    r1
-        nop
-.Lmwake2b:
+        extu.b r5, r5
+.Lwake2b:
 
 13:
        bra     14b
         mov.l  @(12,r15), r0
 
        .align  2
-.Lmwait0:
-       .long   __lll_mutex_lock_wait-.Lmwait0b
-.Lmwake0:
-       .long   __lll_mutex_unlock_wake-.Lmwake0b
-.Lmwait1:
-       .long   __lll_mutex_lock_wait-.Lmwait1b
-.Lmwake1:
-       .long   __lll_mutex_unlock_wake-.Lmwake1b
-.Lmwake2:
-       .long   __lll_mutex_unlock_wake-.Lmwake2b
+.Lwait0:
+       .long   __lll_lock_wait-.Lwait0b
+.Lwake0:
+       .long   __lll_unlock_wake-.Lwake0b
+.Lwait1:
+       .long   __lll_lock_wait-.Lwait1b
+.Lwake1:
+       .long   __lll_unlock_wake-.Lwake1b
+.Lwake2:
+       .long   __lll_unlock_wake-.Lwake2b
        .size   __pthread_cond_wait, .-__pthread_cond_wait
 weak_alias (__pthread_cond_wait, pthread_cond_wait)
 
@@ -368,10 +433,17 @@ __condvar_w_cleanup:
 #if cond_lock != 0
        add     #cond_lock, r5
 #endif
-       mov.l   .Lmwait3, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r6
+       mov     #LLL_SHARED, r6
+99:
+       extu.b  r6, r6
+       mov.l   .Lwait3, r1
        bsrf    r1
         mov    r2, r4
-.Lmwait3b:
+.Lwait3b:
 
 1:
        mov.l   @(broadcast_seq,r8), r0
@@ -382,6 +454,21 @@ __condvar_w_cleanup:
        mov     #1, r2
        mov     #0, r3
 
+       /* We increment the wakeup_seq counter only if it is lower than
+          total_seq.  If this is not the case the thread was woken and
+          then canceled.  In this case we ignore the signal.  */
+       mov.l   @(total_seq+4,r8), r0
+       mov.l   @(wakeup_seq+4,r8), r1
+       cmp/hi  r1, r0
+       bt/s    6f
+        cmp/hi r0, r1
+       bt      7f
+       mov.l   @(total_seq,r8), r0
+       mov.l   @(wakeup_seq,r8), r1
+       cmp/hs  r0, r1
+       bt      7f
+
+6:
        clrt
        mov.l   @(wakeup_seq,r8),r0
        mov.l   @(wakeup_seq+4,r8),r1
@@ -393,6 +480,7 @@ __condvar_w_cleanup:
        add     r2, r0
        mov.l   r0,@(cond_futex,r8)
 
+7:
        clrt
        mov.l   @(woken_seq,r8),r0
        mov.l   @(woken_seq+4,r8),r1
@@ -402,7 +490,7 @@ __condvar_w_cleanup:
        mov.l   r1,@(woken_seq+4,r8)
 
 3:
-       mov     #(1 << clock_bits), r2
+       mov     #(1 << nwaiters_shift), r2
        mov.l   @(cond_nwaiters,r8),r0
        sub     r2, r0
        mov.l   r0,@(cond_nwaiters,r8)
@@ -415,7 +503,7 @@ __condvar_w_cleanup:
        not     r0, r0
        cmp/eq  #0, r0
        bf/s    4f
-        mov    #((1 << clock_bits) - 1), r1
+        mov    #((1 << nwaiters_shift) - 1), r1
        not     r1, r1
        mov.l   @(cond_nwaiters,r8),r0
        tst     r1, r0
@@ -423,7 +511,22 @@ __condvar_w_cleanup:
 
        mov     r8, r4
        add     #cond_nwaiters, r4
-       mov     #FUTEX_WAKE, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff1, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
+99:
        mov     #1, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -445,10 +548,16 @@ __condvar_w_cleanup:
 #if cond_lock != 0
        add     #cond_lock, r4
 #endif
-       mov.l   .Lmwake3, r1
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bf/s    99f
+        mov    #LLL_PRIVATE, r5
+       mov     #LLL_SHARED, r5
+99:
+       mov.l   .Lwake3, r1
        bsrf    r1
-        nop
-.Lmwake3b:
+        extu.b r5, r5
+.Lwake3b:
 
 2:
        /* Wake up all waiters to make sure no signal gets lost.  */
@@ -456,7 +565,22 @@ __condvar_w_cleanup:
        bf/s    5f
         mov    r8, r4
        add     #cond_futex, r4
-       mov     #FUTEX_WAKE, r5
+       mov.l   @(dep_mutex,r8), r0
+       cmp/eq  #-1, r0
+       bt/s    99f
+        mov    #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff1, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
+99:
        mov     #-1, r6
        shlr    r6              /* r6 = 0x7fffffff */
        mov     #0, r7
@@ -480,11 +604,15 @@ __condvar_w_cleanup:
         mov    r11, r4
        sleep
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff1:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
        .align  2
-.Lmwait3:
-       .long   __lll_mutex_lock_wait-.Lmwait3b
-.Lmwake3:
-        .long   __lll_mutex_unlock_wake-.Lmwake3b
+.Lwait3:
+       .long   __lll_lock_wait-.Lwait3b
+.Lwake3:
+        .long   __lll_unlock_wake-.Lwake3b
 .Lmlocki3:
        .long   __pthread_mutex_cond_lock-.Lmlocki3b
 .Lresume:
index de83dec..caebb93 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 
 #include <unwindbuf.h>
 #include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT     0
-#define FUTEX_WAKE     1
 
        .comm   __fork_generation, 4, 4
 
@@ -94,7 +94,19 @@ __pthread_once:
        bf      3f      /* Different for generation -> run initializer.  */
 
        /* Somebody else got here first.  Wait.  */
-       mov     #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+# if FUTEX_WAIT != 0
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+# endif
+#endif
        mov     r3, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -156,7 +168,17 @@ __pthread_once:
        INC (@r9, r2)
        /* Wake up all other threads.  */
        mov     r9, r4
-       mov     #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r5
+       extu.b  r5, r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
        mov     #-1, r6
        shlr    r6              /* r6 = 0x7fffffff */
        mov     #0, r7
@@ -191,7 +213,17 @@ __pthread_once:
        mov     #0, r7
        mov.l   r7, @r9
        mov     r9, r4
-       mov     #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r5
+#else
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+#endif
+       extu.b  r5, r5
        mov     #-1, r6
        shlr    r6              /* r6 = 0x7fffffff */
        mov     #SYS_futex, r3
@@ -207,6 +239,10 @@ __pthread_once:
        sleep
        cfi_endproc
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
        .align  2
 .Lsigsetjmp:
        .long   __sigsetjmp@PLT-(.Lsigsetjmp0-.)
@@ -223,23 +259,3 @@ __pthread_once_internal = __pthread_once
 
        .globl  pthread_once
 pthread_once = __pthread_once
-
-
-       .type   clear_once_control,@function
-       .align  5
-clear_once_control:
-       mov     #0, r0
-       mov.l   r0, @r4
-
-       mov     #FUTEX_WAKE, r5
-       mov     #-1, r6
-       shlr    r6              /* r6 = 0x7fffffff */
-       mov     #0, r7
-       mov     #SYS_futex, r3
-       extu.b  r3, r3
-       trapa   #0x14
-       SYSCALL_INST_PAD
-
-       rts
-        nop
-       .size   clear_once_control,.-clear_once_control
index ce1ab37..52fe5de 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 
        .text
 
@@ -54,7 +53,8 @@ __pthread_rwlock_rdlock:
        mov.l   @(WRITERS_QUEUED,r8), r0
        tst     r0, r0
        bt      5f
-       mov.l   @(FLAGS,r8), r0
+       mov     #FLAGS, r0
+       mov.b   @(r0,r8), r0
        tst     r0, r0
        bt      5f
 3:
@@ -74,9 +74,28 @@ __pthread_rwlock_rdlock:
        tst     r2, r2
        bf      10f
 11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+       xor     r0, r5
+       extu.b  r5, r5
+#else
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
+# if FUTEX_WAIT != 0
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+# endif
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r0
+       xor     r0, r5
+#endif
        mov     r8, r4
        add     #READERS_WAKEUP, r4
-       mov     #FUTEX_WAIT, r5
        mov     r9, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -123,15 +142,22 @@ __pthread_rwlock_rdlock:
        rts
         mov    r3, r0
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+
 1:
        mov     r8, r5
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait0, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait0b:
        bra     2b
         nop
@@ -154,6 +180,9 @@ __pthread_rwlock_rdlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake0, r1
        bsrf    r1
         nop
@@ -182,6 +211,9 @@ __pthread_rwlock_rdlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake1, r1
        bsrf    r1
         nop
@@ -194,23 +226,25 @@ __pthread_rwlock_rdlock:
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait1, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait1b:
        bra     13b
         nop
 
        .align  2
 .Lwait0:
-       .long   __lll_mutex_lock_wait-.Lwait0b
+       .long   __lll_lock_wait-.Lwait0b
 .Lwake0:
-       .long   __lll_mutex_unlock_wake-.Lwake0b
+       .long   __lll_unlock_wake-.Lwake0b
 .Lwait1:
-       .long   __lll_mutex_lock_wait-.Lwait1b
+       .long   __lll_lock_wait-.Lwait1b
 .Lwake1:
-       .long   __lll_mutex_unlock_wake-.Lwake1b
+       .long   __lll_unlock_wake-.Lwake1b
        .size   __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
 
        .globl  pthread_rwlock_rdlock
index 8a4e7d3..6e7af21 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 #include "lowlevel-atomic.h"
 
-#define SYS_gettimeofday       __NR_gettimeofday
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 
        .text
 
@@ -58,7 +56,8 @@ pthread_rwlock_timedrdlock:
        mov.l   @(WRITERS_QUEUED,r8), r0
        tst     r0, r0
        bt      5f
-       mov.l   @(FLAGS,r8), r0
+       mov     #FLAGS, r0
+       mov.b   @(r0,r8), r0
        tst     r0, r0
        bt      5f
 3:
@@ -88,7 +87,7 @@ pthread_rwlock_timedrdlock:
        /* Get current time.  */
        mov     r15, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -116,7 +115,26 @@ pthread_rwlock_timedrdlock:
 
        /* Futex call.  */
        mov     r15, r7
-       mov     #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+       xor     r0, r5
+       extu.b  r5, r5
+#else
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
+# if FUTEX_WAIT != 0
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+# endif
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r0
+       xor     r0, r5
+#endif
        mov     r10, r6
        mov     r8, r4
        add     #READERS_WAKEUP, r4
@@ -175,6 +193,10 @@ pthread_rwlock_timedrdlock:
        rts
         mov    r3, r0
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
        .align  2
 .L1k0:
        .long   1000
@@ -186,10 +208,12 @@ pthread_rwlock_timedrdlock:
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait2, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait2b:
        bra     2b
         nop
@@ -208,16 +232,20 @@ pthread_rwlock_timedrdlock:
        .word   TID - TLS_PRE_TCB_SIZE
 
 6:
+       mov     r3, r10
        mov     r8, r4
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake2, r1
        bsrf    r1
         nop
 .Lwake2b:
        bra     7b
-        mov    #0, r3
+        mov    r10, r3
 
 8:
        /* Overflow.  */
@@ -240,6 +268,9 @@ pthread_rwlock_timedrdlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake3, r1
        bsrf    r1
         nop
@@ -248,17 +279,20 @@ pthread_rwlock_timedrdlock:
         nop
 
 12:
+       mov     r3, r10
        mov     r8, r5
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait3, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait3b:
        bra     13b
-        nop
+        mov    r10, r3
 
 16:
        bra     17b
@@ -270,11 +304,11 @@ pthread_rwlock_timedrdlock:
 
        .align  2
 .Lwait2:
-       .long   __lll_mutex_lock_wait-.Lwait2b
+       .long   __lll_lock_wait-.Lwait2b
 .Lwake2:
-       .long   __lll_mutex_unlock_wake-.Lwake2b
+       .long   __lll_unlock_wake-.Lwake2b
 .Lwait3:
-       .long   __lll_mutex_lock_wait-.Lwait3b
+       .long   __lll_lock_wait-.Lwait3b
 .Lwake3:
-       .long   __lll_mutex_unlock_wake-.Lwake3b
+       .long   __lll_unlock_wake-.Lwake3b
        .size   pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
index 6284140..1cb7cbd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 #include "lowlevel-atomic.h"
 
-#define SYS_gettimeofday       __NR_gettimeofday
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 
        .text
 
@@ -85,7 +83,7 @@ pthread_rwlock_timedwrlock:
        /* Get current time.  */
        mov     r15, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -113,7 +111,26 @@ pthread_rwlock_timedwrlock:
 
        /* Futex call.  */
        mov     r15, r7
-       mov     #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+       xor     r0, r5
+       extu.b  r5, r5
+#else
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
+# if FUTEX_WAIT != 0
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+# endif
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r0
+       xor     r0, r5
+#endif
        mov     r10, r6
        mov     r8, r4
        add     #WRITERS_WAKEUP, r4
@@ -174,6 +191,10 @@ pthread_rwlock_timedwrlock:
        rts
         mov    r3, r0
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
 .L1k1:
        .word   1000
        .align  2
@@ -185,10 +206,12 @@ pthread_rwlock_timedwrlock:
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait6, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait6b:
        bra     2b
         nop
@@ -202,16 +225,20 @@ pthread_rwlock_timedwrlock:
        bra     9b
         mov    #EDEADLK, r3
 6:
+       mov     r3, r10
        mov     r8, r4
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake6, r1
        bsrf    r1
         nop
 .Lwake6b:
        bra     7b
-        mov    #0, r3
+        mov    r10, r3
 
 .Ltidoff:
        .word   TID - TLS_PRE_TCB_SIZE
@@ -229,6 +256,9 @@ pthread_rwlock_timedwrlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake7, r1
        bsrf    r1
         nop
@@ -237,17 +267,20 @@ pthread_rwlock_timedwrlock:
         nop
 
 12:
+       mov     r3, r10
        mov     r8, r5
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait7, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait7b:
        bra     13b
-        nop
+        mov    r10, r3
 
 16:
        bra     17b
@@ -255,11 +288,11 @@ pthread_rwlock_timedwrlock:
 
        .align  2
 .Lwait6:
-       .long   __lll_mutex_lock_wait-.Lwait6b
+       .long   __lll_lock_wait-.Lwait6b
 .Lwake6:
-       .long   __lll_mutex_unlock_wake-.Lwake6b
+       .long   __lll_unlock_wake-.Lwake6b
 .Lwait7:
-       .long   __lll_mutex_lock_wait-.Lwait7b
+       .long   __lll_lock_wait-.Lwait7b
 .Lwake7:
-       .long   __lll_mutex_unlock_wake-.Lwake7b
+       .long   __lll_unlock_wake-.Lwake7b
        .size   pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
index 74f32f8..239090b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
+#include <bits/kernel-features.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 
        .text
 
@@ -86,7 +85,24 @@ __pthread_rwlock_unlock:
        bf      7f
 
 8:
-       mov     #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r0
+       xor     r0, r5
+       extu.b  r5, r5
+#else
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
+       mov     #FUTEX_WAKE, r0
+       or      r0, r5
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r0
+       xor     r0, r5
+#endif
        mov     #SYS_futex, r3
        mov     #0, r7
        extu.b  r3, r3
@@ -118,10 +134,12 @@ __pthread_rwlock_unlock:
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait8, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait8b:
        bra     2b
         nop
@@ -130,6 +148,9 @@ __pthread_rwlock_unlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake8, r1
        bsrf    r1
         nop
@@ -144,6 +165,9 @@ __pthread_rwlock_unlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake9, r1
        bsrf    r1
         nop
@@ -153,13 +177,17 @@ __pthread_rwlock_unlock:
        bra     8b
         mov.l  @r15+, r4
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
        .align  2
 .Lwait8:
-       .long   __lll_mutex_lock_wait-.Lwait8b
+       .long   __lll_lock_wait-.Lwait8b
 .Lwake8:
-       .long   __lll_mutex_unlock_wake-.Lwake8b
+       .long   __lll_unlock_wake-.Lwake8b
 .Lwake9:
-       .long   __lll_mutex_unlock_wake-.Lwake9b
+       .long   __lll_unlock_wake-.Lwake9b
        .size   __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
 
        .globl  pthread_rwlock_unlock
index d071f7f..3d37fb4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 #include "lowlevel-atomic.h"
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 
        .text
 
@@ -73,7 +72,26 @@ __pthread_rwlock_wrlock:
 11:
        mov     r8, r4
        add     #WRITERS_WAKEUP, r4
-       mov     #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       mov     #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+       xor     r0, r5
+       extu.b  r5, r5
+#else
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
+# if FUTEX_WAIT != 0
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+# endif
+       stc     gbr, r1
+       mov.w   .Lpfoff, r2
+       add     r2, r1
+       mov.l   @r1, r0
+       xor     r0, r5
+#endif
        mov     r9, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -123,10 +141,12 @@ __pthread_rwlock_wrlock:
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait4, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait4b:
        bra     2b
         nop
@@ -144,6 +164,9 @@ __pthread_rwlock_wrlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake4, r1
        bsrf    r1
         nop
@@ -151,6 +174,10 @@ __pthread_rwlock_wrlock:
        bra     7b
         mov    #0, r3
 
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+       .word   PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
 .Ltidoff:
        .word   TID - TLS_PRE_TCB_SIZE
 
@@ -166,6 +193,9 @@ __pthread_rwlock_wrlock:
 #if MUTEX != 0
        add     #MUTEX, r4
 #endif
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r5
+       extu.b  r5, r5
        mov.l   .Lwake5, r1
        bsrf    r1
         nop
@@ -178,23 +208,25 @@ __pthread_rwlock_wrlock:
 #if MUTEX != 0
        add     #MUTEX, r5
 #endif
-       mov     r2, r4
+       mov     #PSHARED, r0
+       mov.b   @(r0,r8), r6
+       extu.b  r6, r6
        mov.l   .Lwait5, r1
        bsrf    r1
-        nop
+        mov    r2, r4
 .Lwait5b:
        bra     13b
         nop
 
        .align  2
 .Lwait4:
-       .long   __lll_mutex_lock_wait-.Lwait4b
+       .long   __lll_lock_wait-.Lwait4b
 .Lwake4:
-       .long   __lll_mutex_unlock_wake-.Lwake4b
+       .long   __lll_unlock_wake-.Lwake4b
 .Lwait5:
-       .long   __lll_mutex_lock_wait-.Lwait5b
+       .long   __lll_lock_wait-.Lwait5b
 .Lwake5:
-       .long   __lll_mutex_unlock_wake-.Lwake5b
+       .long   __lll_unlock_wake-.Lwake5b
        .globl  pthread_rwlock_wrlock
 pthread_rwlock_wrlock = __pthread_rwlock_wrlock
 
index 9755b7e..f71cd93 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <structsem.h>
+#include <lowlevellock.h>
 #include "lowlevel-atomic.h"
 
 
-#define SYS_gettimeofday       __NR_gettimeofday
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
        .text
 
        .globl  __new_sem_post
        .type   __new_sem_post,@function
        .align  5
 __new_sem_post:
-       INC     (@r4, r6)
-
+       mov.l   @(VALUE,r4), r2
+0:
+       mov.l   .Lmax, r1
+       cmp/eq  r1, r2
+       bt/s    3f
+        mov    r2, r3
+       mov     r3, r5
+       add     #1, r5
+       CMPXCHG (r3, @(VALUE,r4), r5, r2)
+       bf      0b
+       mov.l   @(NWAITERS,r4), r2
+       tst     r2, r2
+       bt      2f
        mov     #FUTEX_WAKE, r5
+       mov.l   @(PRIVATE,r4), r1
+       or      r1, r5
+       mov     #1, r6
        mov     #0, r7
        mov     #SYS_futex, r3
        extu.b  r3, r3
@@ -42,11 +54,20 @@ __new_sem_post:
 
        cmp/pz  r0
        bf      1f
+2:
        rts
         mov    #0, r0
 
 1:
-       mov     #EINVAL, r2
+       bra     4f
+        mov    #EINVAL, r2
+
+3:
+       mov     #EOVERFLOW, r2
+4:
+       mov.l   r12, @-r15
+       mov.l   r8, @-r15
+       sts.l   pr, @-r15
        mova    .Lgot3, r0
        mov.l   .Lgot3, r12
        add     r0, r12
@@ -55,25 +76,30 @@ __new_sem_post:
        mov.l   .Lerrno3, r0
        stc     gbr, r1
        mov.l   @(r0, r12), r0
-       bra .Lexit
-       add     r1, r0
-       .align 2
+       bra     .Lexit
+        add    r1, r0
+       .align  2
 .Lerrno3:
        .long   errno@GOTTPOFF
 .Lexit:
+       mov.l   r2, @r0
 #else
+       mov     r2, r8
        mov.l   .Lerrloc3, r1
        bsrf    r1
         nop
 .Lerrloc3b:
+       mov     r8, @r0
 #endif
-       mov.l   r2, @r0
        lds.l   @r15+, pr
+       mov.l   @r15+, r8
        mov.l   @r15+, r12
        rts
         mov    #-1, r0
 
        .align  2
+.Lmax:
+       .long   SEM_VALUE_MAX
 .Lgot3:
        .long   _GLOBAL_OFFSET_TABLE_
 #if !USE___THREAD
index 40782fc..774442f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #include <sysdep.h>
 #include <pthread-errnos.h>
 #include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
 #include "lowlevel-atomic.h"
 
 
-#define SYS_gettimeofday       __NR_gettimeofday
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
 
        .text
 
        .globl  sem_timedwait
        .type   sem_timedwait,@function
        .align  5
-       cfi_startproc
 sem_timedwait:
-       /* First check for cancellation.  */
-       stc     gbr, r0
-       mov.w   .Lchand, r1
-       mov.l   @(r0,r1), r0
-       mov     #0xf9, r1
-       and     r1, r0
-       cmp/eq  #8, r0
-       bf      0f
-       bra     10f
-        stc    gbr, r0
-0:
+.LSTARTCODE:
        mov.l   @r4, r0
 2:
        tst     r0, r0
@@ -60,22 +51,17 @@ sem_timedwait:
 1:
        /* Check whether the timeout value is valid.  */
        mov.l   r8, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r8, 0)
+.Lpush_r8:
        mov.l   r9, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r9, 0)
+.Lpush_r9:
        mov.l   r10, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r10, 0)
+.Lpush_r10:
        mov.l   r12, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r12, 0)
+.Lpush_r12:
        sts.l   pr, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (pr, 0)
+.Lpush_pr:
        add     #-8, r15
-       cfi_adjust_cfa_offset(8)
+.Lalloc:
        mov     r4, r8
        mov     r5, r9
 
@@ -85,17 +71,13 @@ sem_timedwait:
        cmp/hs  r1, r0
        bt/s    6f
         mov    #EINVAL, r0
-7:
-       mov.l   .Lenable0, r1
-       bsrf    r1
-        nop
-.Lenable0b:
-       mov     r0, r10
+       INC (@(NWAITERS,r8),r2)
 
+7:
        /* Compute relative timeout.  */
        mov     r15, r4
        mov     #0, r5
-       mov     #SYS_gettimeofday, r3
+       mov     #__NR_gettimeofday, r3
        trapa   #0x12
        SYSCALL_INST_PAD
 
@@ -116,15 +98,27 @@ sem_timedwait:
 5:
        cmp/pz  r2
        bf/s    6f              /* Time is already up.  */
-       mov     #ETIMEDOUT, r0
+        mov    #ETIMEDOUT, r0
 
        /* Store relative timeout.  */
        mov.l   r2, @r15
        mov.l   r3, @(4,r15)
 
-       /* Futex call.  */
+.LcleanupSTART:
+       mov.l   .Lenable0, r1
+       bsrf    r1
+        nop
+.Lenable0b:
+       mov     r0, r10
+
        mov     r8, r4
-       mov     #FUTEX_WAIT, r5
+#if FUTEX_WAIT == 0
+       mov.l   @(PRIVATE,r8), r5
+#else
+       mov.l   @(PRIVATE,r8), r5
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+#endif
        mov     #0, r6
        mov     r15, r7
        mov     #SYS_futex, r3
@@ -138,6 +132,7 @@ sem_timedwait:
         mov    r0, r10
 .Ldisable0b:
        mov     r10, r0
+.LcleanupEND:
 
        tst     r0, r0
        bt      9f
@@ -156,6 +151,10 @@ sem_timedwait:
        bf/s    8b
         mov    r2, r0
 
+       DEC (@(NWAITERS,r8), r2)
+       mov     #0, r0
+
+10:
        add     #8, r15
        lds.l   @r15+, pr
        mov.l   @r15+, r12
@@ -163,12 +162,12 @@ sem_timedwait:
        mov.l   @r15+, r9
        mov.l   @r15+, r8
        rts
-        mov    #0, r0
+        nop
 
 3:
        neg     r0, r0
 6:
-       mov     r0, r8
+       mov     r0, r10
        mova    .Lgot2, r0
        mov.l   .Lgot2, r12
        add     r0, r12
@@ -177,11 +176,11 @@ sem_timedwait:
        mov.l   .Lerrno2, r0
        stc     gbr, r1
        mov.l   @(r0, r12), r0
-       bra .Lexit
-       add     r1, r0
-       .align 2
+       bra     .Lexit
+        add    r1, r0
+       .align  2
 .Lerrno2:
-       .long   errno@GOTTPOFF
+       .long   errno@GOTTPOFF
 .Lexit:
 #else
        mov.l   .Lerrloc2, r1
@@ -189,39 +188,13 @@ sem_timedwait:
         nop
 .Lerrloc2b:
 #endif
-       mov.l   r8, @r0
-       add     #8, r15
-       lds.l   @r15+, pr
-       mov.l   @r15+, r12
-       mov.l   @r15+, r10
-       mov.l   @r15+, r9
-       mov.l   @r15+, r8
-       rts
+       mov.l   r10, @r0
+       DEC (@(NWAITERS,r8), r2)
+       bra     10b
         mov    #-1, r0
 
-10:
-       /* Canceled.  */
-       mov.w   .Lresult, r1
-       mov     #-1, r2
-       mov.l   r2, @(r0,r1)
-       mov.w   .Lchand, r0
-       or.b    #0x10, @(r0,gbr)
-       stc     gbr, r0
-       mov.w   .Lclbuf, r1
-       mov.l   .Lunwind, r2
-       braf    r2
-        mov.l  @(r0,r1), r4
-.Lunwindb:
-       cfi_endproc
-
 .L1k:
        .word   1000
-.Lchand:
-       .word   CANCELHANDLING - TLS_PRE_TCB_SIZE
-.Lresult:
-       .word   RESULT - TLS_PRE_TCB_SIZE
-.Lclbuf:
-       .word   CLEANUP_JMP_BUF - TLS_PRE_TCB_SIZE
        .align  2
 .L1g:
        .long   1000000000
@@ -235,6 +208,151 @@ sem_timedwait:
        .long   __pthread_enable_asynccancel-.Lenable0b
 .Ldisable0:
        .long   __pthread_disable_asynccancel-.Ldisable0b
-.Lunwind:
-       .long   HIDDEN_JUMPTARGET (__pthread_unwind)-.Lunwindb
        .size   sem_timedwait,.-sem_timedwait
+
+       .type   sem_wait_cleanup,@function
+sem_wait_cleanup:
+       DEC (@(NWAITERS,r8), r2)
+.LcallUR:
+       mov.l   .Lresume, r1
+#ifdef PIC
+       add     r12, r1
+#endif
+       jsr     @r1
+        nop
+       sleep
+
+       .align  2
+.Lresume:
+#ifdef PIC
+       .long   _Unwind_Resume@GOTOFF
+#else
+       .long   _Unwind_Resume
+#endif
+.LENDCODE:
+       .size   sem_wait_cleanup,.-sem_wait_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   0xff                            ! @LPStart format (omit)
+       .byte   0xff                            ! @TType format (omit)
+       .byte   0x01                            ! call-site format
+                                               ! DW_EH_PE_uleb128
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 sem_wait_cleanup-.LSTARTCODE
+       .uleb128  0
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+       .ualong .LENDCIE-.LSTARTCIE             ! Length of the CIE.
+.LSTARTCIE:
+       .ualong 0                               ! CIE ID.
+       .byte   1                               ! Version number.
+#ifdef SHARED
+       .string "zPLR"                          ! NUL-terminated augmentation
+                                               ! string.
+#else
+       .string "zPL"                           ! NUL-terminated augmentation
+                                               ! string.
+#endif
+       .uleb128 1                              ! Code alignment factor.
+       .sleb128 -4                             ! Data alignment factor.
+       .byte   0x11                            ! Return address register
+                                               ! column.
+#ifdef SHARED
+       .uleb128 7                              ! Augmentation value length.
+       .byte   0x9b                            ! Personality: DW_EH_PE_pcrel
+                                               ! + DW_EH_PE_sdata4
+                                               ! + DW_EH_PE_indirect
+       .ualong DW.ref.__gcc_personality_v0-.
+       .byte   0x1b                            ! LSDA Encoding: DW_EH_PE_pcrel
+                                               ! + DW_EH_PE_sdata4.
+       .byte   0x1b                            ! FDE Encoding: DW_EH_PE_pcrel
+                                               ! + DW_EH_PE_sdata4.
+#else
+       .uleb128 6                              ! Augmentation value length.
+       .byte   0x0                             ! Personality: absolute
+       .ualong __gcc_personality_v0
+       .byte   0x0                             ! LSDA Encoding: absolute
+#endif
+       .byte 0x0c                              ! DW_CFA_def_cfa
+       .uleb128 0xf
+       .uleb128 0
+       .align 4
+.LENDCIE:
+
+       .ualong .LENDFDE-.LSTARTFDE             ! Length of the FDE.
+.LSTARTFDE:
+       .ualong .LSTARTFDE-.LSTARTFRAME         ! CIE pointer.
+#ifdef SHARED
+       .ualong .LSTARTCODE-.                   ! PC-relative start address
+                                               ! of the code.
+#else
+       .ualong .LSTARTCODE                     ! Start address of the code.
+#endif
+       .ualong .LENDCODE-.LSTARTCODE           ! Length of the code.
+       .uleb128 4                              ! Augmentation size
+#ifdef SHARED
+       .ualong .LexceptSTART-.
+#else
+       .ualong .LexceptSTART
+#endif
+
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r8-.LSTARTCODE
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 4
+       .byte   0x88                            ! DW_CFA_offset r8
+        .uleb128 1
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r9-.Lpush_r8
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 8
+       .byte   0x89                            ! DW_CFA_offset r9
+        .uleb128 2
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r10-.Lpush_r9
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 12
+       .byte   0x8a                            ! DW_CFA_offset r10
+        .uleb128 3
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r12-.Lpush_r10
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 16
+       .byte   0x8c                            ! DW_CFA_offset r12
+        .uleb128 4
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_pr-.Lpush_r12
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 20
+       .byte   0x91                            ! DW_CFA_offset pr
+       .uleb128 5
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lalloc-.Lpush_pr
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 28
+       .align  4
+.LENDFDE:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  4
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+       .long   __gcc_personality_v0
+#endif
index 00c61f3..b46eb1a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <lowlevellock.h>
 #include "lowlevel-atomic.h"
 
 
@@ -59,9 +60,9 @@ __new_sem_trywait:
        mov.l   .Lerrno1, r0
        stc     gbr, r1
        mov.l   @(r0, r12), r0
-       bra .Lexit
-       add     r1, r0
-       .align 2
+       bra     .Lexit
+        add    r1, r0
+       .align  2
 .Lerrno1:
        .long   errno@GOTTPOFF
 .Lexit:
index 7d13fa1..00a125b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
 #include <sysdep.h>
 #include <pthread-errnos.h>
 #include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
 #include "lowlevel-atomic.h"
 
 
-#define SYS_gettimeofday       __NR_gettimeofday
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
 
        .text
 
        .globl  __new_sem_wait
        .type   __new_sem_wait,@function
        .align  5
-       cfi_startproc
 __new_sem_wait:
-       /* First check for cancellation.  */
-       stc     gbr, r0
-       mov.w   .Lchand, r1
-       mov.l   @(r0,r1), r0
-       mov     #0xf9, r1
-       and     r1, r0
-       cmp/eq  #8, r0
-       bt      5f
-
+.LSTARTCODE:
        mov.l   r8, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r8, 0)
+.Lpush_r8:
        mov.l   r10, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r10, 0)
+.Lpush_r10:
        mov.l   r12, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (r12, 0)
+.Lpush_r12:
        sts.l   pr, @-r15
-       cfi_adjust_cfa_offset(4)
-       cfi_rel_offset (pr, 0)
+.Lpush_pr:
        mov     r4, r8
-3:
+
        mov.l   @r8, r0
 2:
        tst     r0, r0
@@ -66,10 +55,21 @@ __new_sem_wait:
        CMPXCHG (r4, @r8, r3, r2)
        bf/s    2b
         mov    r2, r0
-       bra     9f
-        mov    #0, r0
+7:
+       mov     #0, r0
+9:
+       lds.l   @r15+, pr
+       mov.l   @r15+, r12
+       mov.l   @r15+, r10
+       rts
+        mov.l  @r15+, r8
 
+.Lafter_ret:
 1:
+       INC (@(NWAITERS,r8),r2)
+
+.LcleanupSTART:
+6:
        mov.l   .Lenable0, r1
        bsrf    r1
         nop
@@ -77,7 +77,13 @@ __new_sem_wait:
        mov     r0, r10
 
        mov     r8, r4
-       mov     #FUTEX_WAIT, r5
+#if FUTEX_WAIT == 0
+       mov.l   @(PRIVATE,r8), r5
+#else
+       mov.l   @(PRIVATE,r8), r5
+       mov     #FUTEX_WAIT, r0
+       or      r0, r5
+#endif
        mov     #0, r6
        mov     #0, r7
        mov     #SYS_futex, r3
@@ -91,14 +97,35 @@ __new_sem_wait:
         mov    r0, r10
 .Ldisable0b:
        mov     r10, r0
+.LcleanupEND:
 
        tst     r0, r0
-       bt      3b
+       bt      3f
        cmp/eq  #-EWOULDBLOCK, r0
-       bt      3b
-       neg     r0, r0
+       bf      4f
+
+3:
+       mov.l   @r8, r0
+5:
+       tst     r0, r0
+       bt      6b
+
+       mov     r0, r3
+       mov     r0, r4
+       add     #-1, r3
+       CMPXCHG (r4, @r8, r3, r2)
+       bf/s    5b
+        mov    r2, r0
+
+       DEC (@(NWAITERS,r8), r2)
+       bra     7b
+        nop
 
-       mov     r0, r8
+4:
+       neg     r0, r0
+       mov     r0, r4
+       DEC (@(NWAITERS,r8), r2)
+       mov     r4, r8
        mova    .Lgot0, r0
        mov.l   .Lgot0, r12
        add     r0, r12
@@ -107,9 +134,9 @@ __new_sem_wait:
        mov.l   .Lerrno0, r0
        stc     gbr, r1
        mov.l   @(r0, r12), r0
-       bra .Lexit
-       add     r1, r0
-       .align 2
+       bra     .Lexit
+        add    r1, r0
+       .align  2
 .Lerrno0:
        .long   errno@GOTTPOFF
 .Lexit:
@@ -120,35 +147,9 @@ __new_sem_wait:
 .Lerrloc0b:
 #endif
        mov.l   r8, @r0
-       mov     #-1, r0
-9:
-       lds.l   @r15+, pr
-       mov.l   @r15+, r12
-       mov.l   @r15+, r10
-       rts
-        mov.l  @r15+, r8
-5:
-       /* Canceled.  */
-       stc     gbr, r0
-       mov.w   .Lresult, r1
-       mov     #-1, r2
-       mov.l   r2, @(r0,r1)
-       mov.w   .Lchand, r0
-       or.b    #0x10, @(r0,gbr)
-       stc     gbr, r0
-       mov.w   .Lclbuf, r1
-       mov.l   .Lunwind, r2
-       braf    r2
-        mov.l  @(r0,r1), r4
-.Lunwindb:
-       cfi_endproc
-
-.Lchand:
-       .word   CANCELHANDLING - TLS_PRE_TCB_SIZE
-.Lresult:
-       .word   RESULT - TLS_PRE_TCB_SIZE
-.Lclbuf:
-       .word   CLEANUP_JMP_BUF - TLS_PRE_TCB_SIZE
+       bra     9b
+        mov    #-1, r0
+
        .align  2
 .Lgot0:
        .long   _GLOBAL_OFFSET_TABLE_
@@ -160,7 +161,143 @@ __new_sem_wait:
        .long   __pthread_enable_asynccancel-.Lenable0b
 .Ldisable0:
        .long   __pthread_disable_asynccancel-.Ldisable0b
-.Lunwind:
-       .long   HIDDEN_JUMPTARGET (__pthread_unwind)-.Lunwindb
        .size   __new_sem_wait,.-__new_sem_wait
        weak_alias(__new_sem_wait, sem_wait)
+
+
+       .type   sem_wait_cleanup,@function
+sem_wait_cleanup:
+       DEC (@(NWAITERS,r8), r2)
+.LcallUR:
+       mov.l   .Lresume, r1
+#ifdef __PIC__
+       add     r12, r1
+#endif
+       jsr     @r1
+        nop
+       sleep
+
+       .align  2
+.Lresume:
+#ifdef __PIC__
+       .long   _Unwind_Resume@GOTOFF
+#else
+       .long   _Unwind_Resume
+#endif
+.LENDCODE:
+       .size   sem_wait_cleanup,.-sem_wait_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   0xff                            ! @LPStart format (omit)
+       .byte   0xff                            ! @TType format (omit)
+       .byte   0x01                            ! call-site format
+                                               ! DW_EH_PE_uleb128
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 sem_wait_cleanup-.LSTARTCODE
+       .uleb128  0
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+       .ualong .LENDCIE-.LSTARTCIE             ! Length of the CIE.
+.LSTARTCIE:
+       .ualong 0                               ! CIE ID.
+       .byte   1                               ! Version number.
+#ifdef SHARED
+       .string "zPLR"                          ! NUL-terminated augmentation
+                                               ! string.
+#else
+       .string "zPL"                           ! NUL-terminated augmentation
+                                               ! string.
+#endif
+       .uleb128 1                              ! Code alignment factor.
+       .sleb128 -4                             ! Data alignment factor.
+       .byte   0x11                            ! Return address register
+                                               ! column.
+#ifdef SHARED
+       .uleb128 7                              ! Augmentation value length.
+       .byte   0x9b                            ! Personality: DW_EH_PE_pcrel
+                                               ! + DW_EH_PE_sdata4
+                                               ! + DW_EH_PE_indirect
+       .ualong DW.ref.__gcc_personality_v0-.
+       .byte   0x1b                            ! LSDA Encoding: DW_EH_PE_pcrel
+                                               ! + DW_EH_PE_sdata4.
+       .byte   0x1b                            ! FDE Encoding: DW_EH_PE_pcrel
+                                               ! + DW_EH_PE_sdata4.
+#else
+       .uleb128 6                              ! Augmentation value length.
+       .byte   0x0                             ! Personality: absolute
+       .ualong __gcc_personality_v0
+       .byte   0x0                             ! LSDA Encoding: absolute
+#endif
+       .byte 0x0c                              ! DW_CFA_def_cfa
+       .uleb128 0xf
+       .uleb128 0
+       .align 4
+.LENDCIE:
+
+       .ualong .LENDFDE-.LSTARTFDE             ! Length of the FDE.
+.LSTARTFDE:
+       .ualong .LSTARTFDE-.LSTARTFRAME         ! CIE pointer.
+#ifdef SHARED
+       .ualong .LSTARTCODE-.                   ! PC-relative start address
+                                               ! of the code.
+#else
+       .ualong .LSTARTCODE                     ! Start address of the code.
+#endif
+       .ualong .LENDCODE-.LSTARTCODE           ! Length of the code.
+       .uleb128 4                              ! Augmentation size
+#ifdef SHARED
+       .ualong .LexceptSTART-.
+#else
+       .ualong .LexceptSTART
+#endif
+
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r8-.LSTARTCODE
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 4
+       .byte   0x88                            ! DW_CFA_offset r8
+        .uleb128 1
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r10-.Lpush_r8
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 8
+       .byte   0x8a                            ! DW_CFA_offset r10
+        .uleb128 2
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_r12-.Lpush_r10
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 12
+       .byte   0x8c                            ! DW_CFA_offset r12
+        .uleb128 3
+       .byte   4                               ! DW_CFA_advance_loc4
+       .ualong .Lpush_pr-.Lpush_r12
+       .byte   14                              ! DW_CFA_def_cfa_offset
+       .uleb128 16
+       .byte   0x91                            ! DW_CFA_offset pr
+        .uleb128 4
+       .align  4
+.LENDFDE:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  4
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+       .long   __gcc_personality_v0
+#endif
index dbaa443..ad2ca40 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
     .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
  .Lpseudo_cancel: \
     sts.l pr,@-r15; \
- .LCFI0: \
+    cfi_adjust_cfa_offset (4); \
+    cfi_rel_offset (pr, 0); \
     add _IMM16,r15; \
+    cfi_adjust_cfa_offset (16); \
     SAVE_ARGS_##args; \
- .LCFI1: \
     CENABLE; \
     LOAD_ARGS_##args; \
     add _IMP16,r15; \
.LCFI2: \
   cfi_adjust_cfa_offset (-16); \
     lds.l @r15+,pr; \
- .LCFI3: \
+    cfi_adjust_cfa_offset (-4); \
+    cfi_restore (pr); \
     DO_CALL(syscall_name, args); \
     SYSCALL_INST_PAD; \
     sts.l pr,@-r15; \
- .LCFI4: \
+    cfi_adjust_cfa_offset (4); \
+    cfi_rel_offset (pr, 0); \
     mov.l r0,@-r15; \
- .LCFI5: \
+    cfi_adjust_cfa_offset (4); \
+    cfi_rel_offset (r0, 0); \
     CDISABLE; \
     mov.l @r15+,r0; \
.LCFI6: \
   cfi_adjust_cfa_offset (-4); \
     lds.l @r15+,pr; \
- .LCFI7: \
+    cfi_adjust_cfa_offset (-4); \
+    cfi_restore (pr); \
     mov r0,r1; \
     mov _IMM12,r2; \
     shad r2,r1; \
     bf .Lpseudo_end; \
  .Lsyscall_error: \
     SYSCALL_ERROR_HANDLER; \
- .Lpseudo_end: \
- /* Create unwinding information for the syscall wrapper.  */ \
- .section .eh_frame,"a",@progbits; \
- .Lframe1: \
-    .ualong .LECIE1-.LSCIE1; \
- .LSCIE1: \
-    .ualong 0x0; \
-    .byte   0x1; \
-    AUGMENTATION_STRING; \
-    .uleb128 0x1; \
-    .sleb128 -4; \
-    .byte   0x11; \
-    AUGMENTATION_PARAM; \
-    .byte   0xc; \
-    .uleb128 0xf; \
-    .uleb128 0x0; \
-    .align 2; \
- .LECIE1: \
- .LSFDE1: \
-    .ualong .LEFDE1-.LASFDE1; \
- .LASFDE1: \
-    .ualong .LASFDE1-.Lframe1; \
-    START_SYMBOL_REF; \
-    .ualong .Lpseudo_end - .Lpseudo_start; \
-    AUGMENTATION_PARAM_FDE; \
-    .byte   0x4; \
-    .ualong .LCFI0-.Lpseudo_start; \
-    .byte   0xe; \
-    .uleb128 0x4; \
-    .byte   0x91; \
-    .uleb128 0x1; \
-    .byte   0x4; \
-    .ualong .LCFI1-.LCFI0; \
-    .byte   0xe; \
-    .uleb128 0x14; \
-    FRAME_REG_##args; \
-    .byte   0x4; \
-    .ualong .LCFI2-.LCFI1; \
-    .byte   0xe; \
-    .uleb128 0x4; \
-    .byte   0x4; \
-    .ualong .LCFI3-.LCFI2; \
-    .byte   0xe; \
-    .uleb128 0x0; \
-    .byte   0xd1; \
-    .byte   0x4; \
-    .ualong .LCFI4-.LCFI3; \
-    .byte   0xe; \
-    .uleb128 0x4; \
-    .byte   0x91; \
-    .uleb128 0x1; \
-    .byte   0x4; \
-    .ualong .LCFI5-.LCFI4; \
-    .byte   0xe; \
-    .uleb128 0x8; \
-    .byte   0x80; \
-    .uleb128 0x2; \
-    .byte   0x4; \
-    .ualong .LCFI6-.LCFI5; \
-    .byte   0xe; \
-    .uleb128 0x4; \
-    .byte   0xc0; \
-    .byte   0x4; \
-    .ualong .LCFI7-.LCFI6; \
-    .byte   0xe; \
-    .uleb128 0x0; \
-    .byte   0xd1; \
-    .align 2; \
- .LEFDE1: \
- .previous
-
-# ifdef SHARED
-#  define AUGMENTATION_STRING .string "zR"
-#  define AUGMENTATION_PARAM .uleb128 1; .byte 0x1b
-#  define AUGMENTATION_PARAM_FDE .uleb128 0
-#  define START_SYMBOL_REF .long .Lpseudo_start-.
-# else
-#  define AUGMENTATION_STRING .ascii "\0"
-#  define AUGMENTATION_PARAM
-#  define AUGMENTATION_PARAM_FDE
-#  define START_SYMBOL_REF .long .Lpseudo_start
-# endif
-
-# define FRAME_REG_0   /* Nothing.  */
-# define FRAME_REG_1   FRAME_REG_0; .byte 0x84; .uleb128 5
-# define FRAME_REG_2   FRAME_REG_1; .byte 0x85; .uleb128 4
-# define FRAME_REG_3   FRAME_REG_2; .byte 0x86; .uleb128 3
-# define FRAME_REG_4   FRAME_REG_3; .byte 0x87; .uleb128 2
-# define FRAME_REG_5   FRAME_REG_4
-# define FRAME_REG_6   FRAME_REG_5
+ .Lpseudo_end:
 
 # undef PSEUDO_END
 # define PSEUDO_END(sym) \
   END (sym)
 
 # define SAVE_ARGS_0   /* Nothing.  */
-# define SAVE_ARGS_1   SAVE_ARGS_0; mov.l r4,@(0,r15)
-# define SAVE_ARGS_2   SAVE_ARGS_1; mov.l r5,@(4,r15)
-# define SAVE_ARGS_3   SAVE_ARGS_2; mov.l r6,@(8,r15)
-# define SAVE_ARGS_4   SAVE_ARGS_3; mov.l r7,@(12,r15)
+# define SAVE_ARGS_1   SAVE_ARGS_0; mov.l r4,@(0,r15); cfi_offset (r4,-4)
+# define SAVE_ARGS_2   SAVE_ARGS_1; mov.l r5,@(4,r15); cfi_offset (r5,-8)
+# define SAVE_ARGS_3   SAVE_ARGS_2; mov.l r6,@(8,r15); cfi_offset (r6,-12)
+# define SAVE_ARGS_4   SAVE_ARGS_3; mov.l r7,@(12,r15); cfi_offset (r7,-16)
 # define SAVE_ARGS_5   SAVE_ARGS_4
 # define SAVE_ARGS_6   SAVE_ARGS_5
 
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index a45c09f..5433eac 100644 (file)
@@ -66,6 +66,6 @@ ENTRY (__vfork)
        .word   PID - TLS_PRE_TCB_SIZE
        .align  2
 PSEUDO_END (__vfork)
-hidden_def (vfork)
+libc_hidden_def (__vfork)
 
 weak_alias (__vfork, vfork)
index 41fa97f..fcc34f7 100644 (file)
@@ -1,5 +1,5 @@
 /* Determine whether the host has multiple processors.  Linux version.
-   Copyright (C) 1996, 2002, 2004 Free Software Foundation, Inc.
+   Copyright (C) 1996, 2002, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
    Boston, MA 02111-1307, USA.  */
 
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/sysctl.h>
-#include <not-cancel.h>
-
 /* Test whether the machine has more than one processor.  This is not the
    best test but good enough.  More complicated tests would require `malloc'
    which is not available at that time.  */
 static inline int
 is_smp_system (void)
 {
-  static const int sysctl_args[] = { CTL_KERN, KERN_VERSION };
-  char buf[512];
-  size_t reslen = sizeof (buf);
-
-  /* Try reading the number using `sysctl' first.  */
-  if (sysctl ((int *) sysctl_args,
-               sizeof (sysctl_args) / sizeof (sysctl_args[0]),
-               buf, &reslen, NULL, 0) < 0)
-    {
-      /* This was not successful.  Now try reading the /proc filesystem.  */
-      int fd = open_not_cancel_2 ("/proc/sys/kernel/version", O_RDONLY);
-      if (__builtin_expect (fd, 0) == -1
-         || (reslen = read_not_cancel (fd, buf, sizeof (buf))) <= 0)
-       /* This also didn't work.  We give up and say it's a UP machine.  */
-       buf[0] = '\0';
-
-      close_not_cancel_no_status (fd);
-    }
-
-  return strstr (buf, "SMP") != NULL;
+  /* Assume all machines are SMP and/or CMT and/or SMT.  */
+  return 1;
 }
index b547310..b6df6dc 100644 (file)
@@ -6,7 +6,8 @@
 #
 
 libpthread_SSRC = pt-vfork.S clone.S
-libpthread_CSRC = pthread_once.c lowlevellock.c
+libpthread_CSRC = pthread_once.c lowlevellock.c \
+                                 pthred_barrier_init.c pthread_barrier_wait.c pthread_barrier_destroy.c
 
 libc_a_CSRC = fork.c libc-lowlevellock.c
 libc_a_SSRC = clone.S vfork.S
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions
deleted file mode 100644 (file)
index d102772..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-libpthread {
-  GLIBC_2.3.3 {
-    # Changed PTHREAD_STACK_MIN.
-    pthread_attr_setstack; pthread_attr_setstacksize;
-  }
-}
index e082ea8..6e35603 100644 (file)
@@ -1,5 +1,5 @@
 /* Minimum guaranteed maximum values for system limits.  Linux/SPARC version.
-   Copyright (C) 1993-1998,2000,2002,2003,2004 Free Software Foundation, Inc.
+   Copyright (C) 1993-1998,2000,2002-2004,2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -31,6 +31,9 @@
 #ifndef OPEN_MAX
 # define __undef_OPEN_MAX
 #endif
+#ifndef ARG_MAX
+# define __undef_ARG_MAX
+#endif
 
 /* The kernel sources contain a file with all the needed information.  */
 #include <linux/limits.h>
 # undef OPEN_MAX
 # undef __undef_OPEN_MAX
 #endif
+/* Have to remove ARG_MAX?  */
+#ifdef __undef_ARG_MAX
+# undef ARG_MAX
+# undef __undef_ARG_MAX
+#endif
 
 /* The number of data keys per process.  */
 #define _POSIX_THREAD_KEYS_MAX 128
@@ -87,3 +95,6 @@
 
 /* Maximum message queue priority level.  */
 #define MQ_PRIO_MAX            32768
+
+/* Maximum value the semaphore can have.  */
+#define SEM_VALUE_MAX   (2147483647)
index 459d1ca..faf0584 100644 (file)
@@ -1,5 +1,5 @@
 /* Machine-specific pthread type layouts.  SPARC version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -90,7 +90,7 @@ typedef union
 #if __WORDSIZE == 64
     int __spins;
     __pthread_list_t __list;
-# define __PTHREAD_MUTEX_HAVE_PREV      1
+# define __PTHREAD_MUTEX_HAVE_PREV     1
 #else
     unsigned int __nusers;
     __extension__ union
@@ -160,9 +160,9 @@ typedef union
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
     int __writer;
-    int __pad1;
+    int __shared;
+    unsigned long int __pad1;
     unsigned long int __pad2;
-    unsigned long int __pad3;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
     unsigned int __flags;
@@ -176,9 +176,12 @@ typedef union
     unsigned int __writer_wakeup;
     unsigned int __nr_readers_queued;
     unsigned int __nr_writers_queued;
+    unsigned char __pad1;
+    unsigned char __pad2;
+    unsigned char __shared;
     /* FLAGS must stay at this position in the structure to maintain
        binary compatibility.  */
-    unsigned int __flags;
+    unsigned char __flags;
     int __writer;
   } __data;
 # endif
index 7f3a328..8fd7d34 100644 (file)
@@ -33,9 +33,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
new file mode 100644 (file)
index 0000000..4f400a3
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef _INTERNALTYPES_H
+#include "../internaltypes.h"
+
+union sparc_pthread_barrier
+{
+  struct pthread_barrier b;
+  struct sparc_pthread_barrier_s
+    {
+      unsigned int curr_event;
+      int lock;
+      unsigned int left;
+      unsigned int init_count;
+      unsigned char left_lock;
+      unsigned char pshared;
+    } s;
+};
+
+struct sparc_new_sem
+{
+  unsigned int value;
+  unsigned char lock;
+  unsigned char private;
+  unsigned char pad[2];
+  unsigned long int nwaiters;
+};
+
+struct sparc_old_sem
+{
+  unsigned int value;
+  unsigned char lock;
+  unsigned char private;
+};
+
+#endif
index e9ec4df..80b0e76 100644 (file)
 #include <sys/time.h>
 
 
-/* These functions don't get included in libc.so  */
 void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
 {
   do
     {
       int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
       if (oldval != 0)
-       lll_futex_wait (futex, 2);
+       lll_futex_wait (futex, 2, LLL_PRIVATE);
     }
   while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
 }
 
 
+/* These functions don't get included in libc.so  */
 #ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+  do
+    {
+      int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
+      if (oldval != 0)
+       lll_futex_wait (futex, 2, private);
+    }
+  while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
+}
+
+
 int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
 {
   /* Reject invalid timeouts.  */
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -70,25 +83,13 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
       /* Wait.  */
       int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
       if (oldval != 0)
-       lll_futex_timed_wait (futex, 2, &rt);
+       lll_futex_timed_wait (futex, 2, &rt, private);
     }
   while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
 
   return 0;
 }
 
-int
-lll_unlock_wake_cb(int* futex)
-{
-    int val = atomic_exchange_24_rel(futex, 0);
-
-    if( __builtin_expect( val > 1, 0 ) ) {
-        lll_futex_wake( futex, 1 );
-    }
-
-    return 0;
-}
-
 
 int
 __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
@@ -122,7 +123,7 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
 
       /* Wait until thread terminates.  The kernel so far does not use
         the private futex operations for this.  */
-      if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT)
+      if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
        return ETIMEDOUT;
     }
 
index 4db6fb0..a43f6b6 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -24,7 +25,7 @@
 #include <sys/param.h>
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
-#include <sysdep.h>
+#include <bits/kernel-features.h>
 
 
 #define FUTEX_WAIT             0
 #define FUTEX_CMP_REQUEUE      4
 #define FUTEX_WAKE_OP          5
 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)                               \
+             & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
 
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
 
-#define lll_futex_wait(futexp, val) \
-  lll_futex_timed_wait (futexp, val, NULL)
+#define lll_futex_wait(futexp, val, private) \
+  lll_futex_timed_wait (futexp, val, NULL, private)
 
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
-    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                 \
-                             FUTEX_WAIT, (val), (timespec));                         \
+                                                                             \
+    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
+                             __lll_private_flag (FUTEX_WAIT, private),       \
+                             (val), (timespec));                             \
     __ret;                                                                   \
   })
 
-#define lll_futex_wake(futexp, nr) \
+#define lll_futex_wake(futexp, nr, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
+                                                                             \
     __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),                     \
-                             FUTEX_WAKE, (nr), 0);                                           \
+                             __lll_private_flag (FUTEX_WAKE, private),       \
+                             (nr), 0);                                       \
     __ret;                                                                   \
   })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
+                                                                             \
     __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
-                             FUTEX_CMP_REQUEUE, (nr_wake), (nr_move), (mutex), (val));       \
+                             __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+                             (nr_wake), (nr_move), (mutex), (val));          \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
   })
 
-#define lll_robust_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
   do                                                                         \
     {                                                                        \
       int *__futexp = &(futexv);                                             \
       atomic_or (__futexp, FUTEX_OWNER_DIED);                                \
-      lll_futex_wake (__futexp, 1);                                  \
+      lll_futex_wake (__futexp, 1, private);                                 \
     }                                                                        \
   while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
 #ifdef __sparc32_atomic_do_lock
 /* Avoid FUTEX_WAKE_OP if supporting pre-v9 CPUs.  */
-# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) 1
+# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) 1
 #else
-# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
+# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
   ({                                                                         \
     INTERNAL_SYSCALL_DECL (__err);                                           \
     long int __ret;                                                          \
                                                                              \
     __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),                     \
-                             FUTEX_WAKE_OP,    \
+                             __lll_private_flag (FUTEX_WAKE_OP, private),    \
                              (nr_wake), (nr_wake2), (futexp2),               \
                              FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);                 \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);                                 \
@@ -101,7 +147,7 @@ __lll_trylock (int *futex)
 {
   return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
 }
-#define lll_mutex_trylock(futex) __lll_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
 
 static inline int
 __attribute__ ((always_inline))
@@ -109,7 +155,7 @@ __lll_cond_trylock (int *futex)
 {
   return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
 }
-#define lll_mutex_cond_trylock(futex) __lll_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
 
 static inline int
 __attribute__ ((always_inline))
@@ -121,116 +167,108 @@ __lll_robust_trylock (int *futex, int id)
   __lll_robust_trylock (&(futex), id)
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
 static inline void
 __attribute__ ((always_inline))
-__lll_lock (int *futex)
+__lll_lock (int *futex, int private)
 {
   int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
 
   if (__builtin_expect (val != 0, 0))
     {
-       __lll_lock_wait (futex);
+      if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+       __lll_lock_wait_private (futex);
+      else
+       __lll_lock_wait (futex, private);
     }
 }
-#define lll_mutex_lock(futex) __lll_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_lock_wait (futex);
+    result = __lll_robust_lock_wait (futex, private);
   return result;
 }
-#define lll_robust_lock(futex, id) \
-  __lll_robust_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 static inline void
 __attribute__ ((always_inline))
-__lll_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
 {
   int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
 
   if (__builtin_expect (val != 0, 0))
-    __lll_lock_wait (futex);
+    __lll_lock_wait (futex, private);
 }
-#define lll_mutex_cond_lock(futex) __lll_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
 
-#define lll_robust_cond_lock(futex, id) \
-  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
 
-extern int __lll_timedlock_wait (int *futex, const struct timespec *) attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+                                int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+                                       int private) attribute_hidden;
 
 static inline int
 __attribute__ ((always_inline))
-__lll_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
 {
   int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
   int result = 0;
 
   if (__builtin_expect (val != 0, 0))
-    result = __lll_timedlock_wait (futex, abstime);
+    result = __lll_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
 
 static inline int
 __attribute__ ((always_inline))
 __lll_robust_timedlock (int *futex, const struct timespec *abstime,
-                       int id)
+                       int id, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_timedlock_wait (futex, abstime);
+    result = __lll_robust_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_robust_timedlock(futex, abstime, id) \
-  __lll_robust_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
 
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
   ((void) ({                                                                 \
     int *__futex = &(lock);                                                  \
     int __val = atomic_exchange_24_rel (__futex, 0);                         \
     if (__builtin_expect (__val > 1, 0))                                     \
-      lll_futex_wake (__futex, 1);                                   \
+      lll_futex_wake (__futex, 1, private);                                  \
   }))
 
-#define lll_robust_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
   ((void) ({                                                                 \
     int *__futex = &(lock);                                                  \
     int __val = atomic_exchange_rel (__futex, 0);                            \
     if (__builtin_expect (__val & FUTEX_WAITERS, 0))                         \
-      lll_futex_wake (__futex, 1);                                   \
+      lll_futex_wake (__futex, 1, private);                                  \
   }))
 
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER           (0)
 #define LLL_LOCK_INITIALIZER_LOCKED    (1)
 
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-#define lll_trylock(lock)      lll_mutex_trylock (lock)
-#define lll_lock(lock)         lll_mutex_lock (lock)
-#define lll_unlock(lock)       lll_mutex_unlock (lock)
-#define lll_islocked(lock)     lll_mutex_islocked (lock)
-
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -240,7 +278,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
     {                                                  \
       __typeof (tid) __tid;                            \
       while ((__tid = (tid)) != 0)                     \
-       lll_futex_wait (&(tid), __tid); \
+       lll_futex_wait (&(tid), __tid, LLL_SHARED);     \
     }                                                  \
   while (0)
 
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h
new file mode 100644 (file)
index 0000000..acf1a61
--- /dev/null
@@ -0,0 +1 @@
+#include "../i386/not-cancel.h"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
new file mode 100644 (file)
index 0000000..ca96379
--- /dev/null
@@ -0,0 +1,45 @@
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_destroy (
+     pthread_barrier_t *barrier)
+{
+  union sparc_pthread_barrier *ibarrier;
+  int result = EBUSY;
+
+  ibarrier = (union sparc_pthread_barrier *) barrier;
+
+  int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+  lll_lock (ibarrier->b.lock, private);
+
+  if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1))
+    /* The barrier is not used anymore.  */
+    result = 0;
+  else
+    /* Still used, return with an error.  */
+    lll_unlock (ibarrier->b.lock, private);
+
+  return result;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
new file mode 100644 (file)
index 0000000..6ca4727
--- /dev/null
@@ -0,0 +1,55 @@
+/* Copyright (C) 2002, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_init (
+     pthread_barrier_t *barrier,
+     const pthread_barrierattr_t *attrm,
+     unsigned int count)
+{
+  union sparc_pthread_barrier *ibarrier;
+
+  if (__builtin_expect (count == 0, 0))
+    return EINVAL;
+
+  struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
+  if (iattr != NULL)
+    {
+      if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+         && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
+       /* Invalid attribute.  */
+       return EINVAL;
+    }
+
+  ibarrier = (union sparc_pthread_barrier *) barrier;
+
+  /* Initialize the individual fields.  */
+  ibarrier->b.lock = LLL_LOCK_INITIALIZER;
+  ibarrier->b.left = count;
+  ibarrier->b.init_count = count;
+  ibarrier->b.curr_event = 0;
+  ibarrier->s.left_lock = 0;
+  ibarrier->s.pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED);
+
+  return 0;
+}
index 3b07cc1..22e2dd3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -30,7 +30,7 @@ clear_once_control (void *arg)
   pthread_once_t *once_control = (pthread_once_t *) arg;
 
   *once_control = 0;
-  lll_futex_wake (once_control, INT_MAX);
+  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
 }
 
 
@@ -65,7 +65,7 @@ __pthread_once (once_control, init_routine)
          if (((oldval ^ newval) & -4) == 0)
            {
              /* Same generation, some other thread was faster. Wait.  */
-             lll_futex_wait (once_control, newval);
+             lll_futex_wait (once_control, newval, LLL_PRIVATE);
              continue;
            }
        }
@@ -84,7 +84,7 @@ __pthread_once (once_control, init_routine)
       atomic_increment (once_control);
 
       /* Wake up all other threads.  */
-      lll_futex_wake (once_control, INT_MAX);
+      lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
       break;
     }
 
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c
new file mode 100644 (file)
index 0000000..f694b5e
--- /dev/null
@@ -0,0 +1,57 @@
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <string.h>
+#include <semaphore.h>
+#include <lowlevellock.h>
+#include "semaphoreP.h"
+#include <bits/kernel-features.h>
+
+
+int
+__new_sem_init (sem, pshared, value)
+     sem_t *sem;
+     int pshared;
+     unsigned int value;
+{
+  /* Parameter sanity check.  */
+  if (__builtin_expect (value > SEM_VALUE_MAX, 0))
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  /* Map to the internal type.  */
+  struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+
+  /* Use the values the user provided.  */
+  memset (isem, '\0', sizeof (*isem));
+  isem->value = value;
+#ifdef __ASSUME_PRIVATE_FUTEX
+  isem->private = pshared ? 0 : FUTEX_PRIVATE_FLAG;
+#else
+  isem->private = pshared ? 0 : THREAD_GETMEM (THREAD_SELF,
+                                              header.private_futex);
+#endif
+
+  return 0;
+}
+weak_alias(__new_sem_init, sem_init)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
new file mode 100644 (file)
index 0000000..302d1b3
--- /dev/null
@@ -0,0 +1,94 @@
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthreadP.h>
+
+/* Wait on barrier.  */
+int
+pthread_barrier_wait (
+     pthread_barrier_t *barrier)
+{
+  union sparc_pthread_barrier *ibarrier
+    = (union sparc_pthread_barrier *) barrier;
+  int result = 0;
+  int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+  /* Make sure we are alone.  */
+  lll_lock (ibarrier->b.lock, private);
+
+  /* One more arrival.  */
+  --ibarrier->b.left;
+
+  /* Are these all?  */
+  if (ibarrier->b.left == 0)
+    {
+      /* Yes. Increment the event counter to avoid invalid wake-ups and
+        tell the current waiters that it is their turn.  */
+      ++ibarrier->b.curr_event;
+
+      /* Wake up everybody.  */
+      lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
+
+      /* This is the thread which finished the serialization.  */
+      result = PTHREAD_BARRIER_SERIAL_THREAD;
+    }
+  else
+    {
+      /* The number of the event we are waiting for.  The barrier's event
+        number must be bumped before we continue.  */
+      unsigned int event = ibarrier->b.curr_event;
+
+      /* Before suspending, make the barrier available to others.  */
+      lll_unlock (ibarrier->b.lock, private);
+
+      /* Wait for the event counter of the barrier to change.  */
+      do
+       lll_futex_wait (&ibarrier->b.curr_event, event, private);
+      while (event == ibarrier->b.curr_event);
+    }
+
+  /* Make sure the init_count is stored locally or in a register.  */
+  unsigned int init_count = ibarrier->b.init_count;
+
+  /* If this was the last woken thread, unlock.  */
+  if (__atomic_is_v9 || ibarrier->s.pshared == 0)
+    {
+      if (atomic_increment_val (&ibarrier->b.left) == init_count)
+       /* We are done.  */
+       lll_unlock (ibarrier->b.lock, private);
+    }
+  else
+    {
+      unsigned int left;
+      /* Slightly more complicated.  On pre-v9 CPUs, atomic_increment_val
+        is only atomic for threads within the same process, not for
+        multiple processes.  */
+      __sparc32_atomic_do_lock24 (&ibarrier->s.left_lock);
+      left = ++ibarrier->b.left;
+      __sparc32_atomic_do_unlock24 (&ibarrier->s.left_lock);
+      if (left == init_count)
+        /* We are done.  */
+       lll_unlock (ibarrier->b.lock, private);
+    }
+
+  return result;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c
new file mode 100644 (file)
index 0000000..940728e
--- /dev/null
@@ -0,0 +1,55 @@
+/* sem_post -- post to a POSIX semaphore.  SPARC version.
+   Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+int
+__new_sem_post (sem_t *sem)
+{
+  struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+  int nr;
+
+  if (__atomic_is_v9)
+    nr = atomic_increment_val (&isem->value);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      nr = ++(isem->value);
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+  atomic_full_barrier ();
+  if (isem->nwaiters > 0)
+    {
+      int err = lll_futex_wake (&isem->value, 1,
+                               isem->private ^ FUTEX_PRIVATE_FLAG);
+      if (__builtin_expect (err, 0) < 0)
+       {
+         __set_errno (-err);
+         return -1;
+       }
+    }
+  return 0;
+}
+weak_alias(__new_sem_post, sem_post)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
new file mode 100644 (file)
index 0000000..aa5bd80
--- /dev/null
@@ -0,0 +1,148 @@
+/* sem_timedwait -- wait on a semaphore.  SPARC version.
+   Copyright (C) 2003, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+#include <pthreadP.h>
+
+
+extern void __sem_wait_cleanup (void *arg) attribute_hidden;
+
+
+int
+sem_timedwait (sem_t *sem, const struct timespec *abstime)
+{
+  struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+  int err;
+  int val;
+
+  if (__atomic_is_v9)
+    val = atomic_decrement_if_positive (&isem->value);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      val = isem->value;
+      if (val > 0)
+        isem->value = val - 1;
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+
+  if (val > 0)
+    return 0;
+
+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+    {
+      __set_errno (EINVAL);
+      return -1;
+    }
+
+  if (__atomic_is_v9)
+    atomic_increment (&isem->nwaiters);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      isem->nwaiters++;
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+
+  pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+  while (1)
+    {
+      struct timeval tv;
+      struct timespec rt;
+      int sec, nsec;
+
+      /* Get the current time.  */
+      __gettimeofday (&tv, NULL);
+
+      /* Compute relative timeout.  */
+      sec = abstime->tv_sec - tv.tv_sec;
+      nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+      if (nsec < 0)
+       {
+         nsec += 1000000000;
+         --sec;
+       }
+
+      /* Already timed out?  */
+      err = -ETIMEDOUT;
+      if (sec < 0)
+       {
+         __set_errno (ETIMEDOUT);
+         err = -1;
+         break;
+       }
+
+      /* Do wait.  */
+      rt.tv_sec = sec;
+      rt.tv_nsec = nsec;
+
+      /* Enable asynchronous cancellation.  Required by the standard.  */
+      int oldtype = __pthread_enable_asynccancel ();
+
+      err = lll_futex_timed_wait (&isem->value, 0, &rt,
+                                 isem->private ^ FUTEX_PRIVATE_FLAG);
+
+      /* Disable asynchronous cancellation.  */
+      __pthread_disable_asynccancel (oldtype);
+
+      if (err != 0 && err != -EWOULDBLOCK)
+       {
+         __set_errno (-err);
+         err = -1;
+         break;
+       }
+
+      if (__atomic_is_v9)
+       val = atomic_decrement_if_positive (&isem->value);
+      else
+       {
+         __sparc32_atomic_do_lock24 (&isem->lock);
+         val = isem->value;
+         if (val > 0)
+           isem->value = val - 1;
+         __sparc32_atomic_do_unlock24 (&isem->lock);
+       }
+
+      if (val > 0)
+       {
+         err = 0;
+         break;
+       }
+    }
+
+  pthread_cleanup_pop (0);
+
+  if (__atomic_is_v9)
+    atomic_decrement (&isem->nwaiters);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      isem->nwaiters--;
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+
+  return err;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c
new file mode 100644 (file)
index 0000000..d4e8938
--- /dev/null
@@ -0,0 +1,54 @@
+/* sem_trywait -- wait on a semaphore.  SPARC version.
+   Copyright (C) 2003, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+
+int
+__new_sem_trywait (sem_t *sem)
+{
+  struct sparc_old_sem *isem = (struct sparc_old_sem *) sem;
+  int val;
+
+  if (isem->value > 0)
+    {
+      if (__atomic_is_v9)
+       val = atomic_decrement_if_positive (&isem->value);
+      else
+       {
+         __sparc32_atomic_do_lock24 (&isem->lock);
+         val = isem->value;
+         if (val > 0)
+           isem->value = val - 1;
+         __sparc32_atomic_do_unlock24 (&isem->lock);
+       }
+      if (val > 0)
+       return 0;
+    }
+
+  __set_errno (EAGAIN);
+  return -1;
+}
+weak_alias(__new_sem_trywait, sem_trywait)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c
new file mode 100644 (file)
index 0000000..cfe04a8
--- /dev/null
@@ -0,0 +1,127 @@
+/* sem_wait -- wait on a semaphore.  Generic futex-using version.
+   Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+#include <pthreadP.h>
+
+
+void
+attribute_hidden
+__sem_wait_cleanup (void *arg)
+{
+  struct sparc_new_sem *isem = (struct sparc_new_sem *) arg;
+
+  if (__atomic_is_v9)
+    atomic_decrement (&isem->nwaiters);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      isem->nwaiters--;
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+}
+
+
+int
+__new_sem_wait (sem_t *sem)
+{
+  struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+  int err;
+  int val;
+
+  if (__atomic_is_v9)
+    val = atomic_decrement_if_positive (&isem->value);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      val = isem->value;
+      if (val > 0)
+       isem->value = val - 1;
+      else
+       isem->nwaiters++;
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+
+  if (val > 0)
+    return 0;
+
+  if (__atomic_is_v9)
+    atomic_increment (&isem->nwaiters);
+  else
+    /* Already done above while still holding isem->lock.  */;
+
+  pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+  while (1)
+    {
+      /* Enable asynchronous cancellation.  Required by the standard.  */
+      int oldtype = __pthread_enable_asynccancel ();
+
+      err = lll_futex_wait (&isem->value, 0,
+                           isem->private ^ FUTEX_PRIVATE_FLAG);
+
+      /* Disable asynchronous cancellation.  */
+      __pthread_disable_asynccancel (oldtype);
+
+      if (err != 0 && err != -EWOULDBLOCK)
+       {
+         __set_errno (-err);
+         err = -1;
+         break;
+       }
+
+      if (__atomic_is_v9)
+       val = atomic_decrement_if_positive (&isem->value);
+      else
+       {
+         __sparc32_atomic_do_lock24 (&isem->lock);
+         val = isem->value;
+         if (val > 0)
+           isem->value = val - 1;
+         __sparc32_atomic_do_unlock24 (&isem->lock);
+       }
+
+      if (val > 0)
+       {
+         err = 0;
+         break;
+       }
+    }
+
+  pthread_cleanup_pop (0);
+
+  if (__atomic_is_v9)
+    atomic_decrement (&isem->nwaiters);
+  else
+    {
+      __sparc32_atomic_do_lock24 (&isem->lock);
+      isem->nwaiters--;
+      __sparc32_atomic_do_unlock24 (&isem->lock);
+    }
+
+  return err;
+}
+weak_alias(__new_sem_wait, sem_wait)
+
index ad650d0..1f55bd6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
 
 #if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
 
 # undef PSEUDO
-# define PSEUDO(name, syscall_name, args)   \
-        .text;                  \
-    .globl      __syscall_error;    \
-ENTRY(name)                 \
-    ld [%g7 + MULTIPLE_THREADS_OFFSET], %g1;\
-    cmp %g1, 0;             \
-    bne 1f;                 \
-.type   __##syscall_name##_nocancel,@function;  \
-.globl  __##syscall_name##_nocancel;        \
-__##syscall_name##_nocancel:            \
-     mov SYS_ify(syscall_name), %g1;    \
-    ta 0x10;                \
-    bcc 8f;                 \
-     mov %o7, %g1;              \
-    call __syscall_error;           \
-     mov %g1, %o7;              \
-8:  jmpl %o7 + 8, %g0;          \
-     nop;                   \
-.size   __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;\
-1:  save %sp, -96, %sp;         \
-    cfi_def_cfa_register(%fp);      \
-    cfi_window_save;            \
-    cfi_register(%o7, %i7);         \
-    CENABLE;                \
-     nop;                   \
-    mov %o0, %l0;               \
-    COPY_ARGS_##args            \
-    mov SYS_ify(syscall_name), %g1;     \
-    ta 0x10;                \
-    bcc 1f;                 \
-     mov %o0, %l1;              \
-    CDISABLE;               \
-     mov %l0, %o0;              \
-    call __syscall_error;           \
-     mov %l1, %o0;              \
-    b 2f;                   \
-     mov -1, %l1;               \
-1:  CDISABLE;               \
-     mov %l0, %o0;              \
-2:  jmpl %i7 + 8, %g0;          \
-     restore %g0, %l1, %o0;
+# define PSEUDO(name, syscall_name, args)      \
+       .text;                                  \
+       .globl          __syscall_error;        \
+ENTRY(name)                                    \
+       ld [%g7 + MULTIPLE_THREADS_OFFSET], %g1;\
+       cmp %g1, 0;                             \
+       bne 1f;                                 \
+.type  __##syscall_name##_nocancel,@function;  \
+.globl __##syscall_name##_nocancel;            \
+__##syscall_name##_nocancel:                   \
+        mov SYS_ify(syscall_name), %g1;        \
+       ta 0x10;                                \
+       bcc 8f;                                 \
+        mov %o7, %g1;                          \
+       call __syscall_error;                   \
+        mov %g1, %o7;                          \
+8:     jmpl %o7 + 8, %g0;                      \
+        nop;                                   \
+.size  __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;\
+1:     save %sp, -96, %sp;                     \
+       cfi_def_cfa_register(%fp);              \
+       cfi_window_save;                        \
+       cfi_register(%o7, %i7);                 \
+       CENABLE;                                \
+        nop;                                   \
+       mov %o0, %l0;                           \
+       COPY_ARGS_##args                        \
+       mov SYS_ify(syscall_name), %g1;         \
+       ta 0x10;                                \
+       bcc 1f;                                 \
+        mov %o0, %l1;                          \
+       CDISABLE;                               \
+        mov %l0, %o0;                          \
+       call __syscall_error;                   \
+        mov %l1, %o0;                          \
+       b 2f;                                   \
+        mov -1, %l1;                           \
+1:     CDISABLE;                               \
+        mov %l0, %o0;                          \
+2:     jmpl %i7 + 8, %g0;                      \
+        restore %g0, %l1, %o0;
+
 
 # ifdef IS_IN_libpthread
 #  define CENABLE      call __pthread_enable_asynccancel
@@ -103,3 +104,9 @@ __##syscall_name##_nocancel:            \
 # define NO_CANCELLATION 1
 
 #endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+                                  header.multiple_threads) == 0, 1)
+#endif
index 1a38277..a8e4dd5 100644 (file)
@@ -45,5 +45,5 @@ ENTRY(__vfork)
         nop
 END(__vfork)
 
-hidden_def (vfork)
+libc_hidden_def (__vfork)
 weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions
deleted file mode 100644 (file)
index 3b111dd..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
-  GLIBC_2.3.3 {
-    # Changed timer_t.
-    timer_create; timer_delete; timer_getoverrun; timer_gettime;
-    timer_settime;
-  }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym
new file mode 100644 (file)
index 0000000..0e2a15f
--- /dev/null
@@ -0,0 +1,12 @@
+#include <limits.h>
+#include <stddef.h>
+#include <sched.h>
+#include <bits/pthreadtypes.h>
+#include "internaltypes.h"
+
+--
+
+VALUE          offsetof (struct new_sem, value)
+PRIVATE                offsetof (struct new_sem, private)
+NWAITERS       offsetof (struct new_sem, nwaiters)
+SEM_VALUE_MAX  SEM_VALUE_MAX
index 9f02fe3..a7da2a0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003,2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003,2004, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -167,6 +167,7 @@ timer_create (
              /* Copy the thread parameters the user provided.  */
              newp->sival = evp->sigev_value;
              newp->thrfunc = evp->sigev_notify_function;
+             newp->sigev_notify = SIGEV_THREAD;
 
              /* We cannot simply copy the thread attributes since the
                 implementation might keep internal information for
@@ -193,12 +194,11 @@ timer_create (
                                                  PTHREAD_CREATE_DETACHED);
 
              /* Create the event structure for the kernel timer.  */
-             struct sigevent sev;
-             sev.sigev_value.sival_ptr = newp;
-             sev.sigev_signo = SIGTIMER;
-             sev.sigev_notify = SIGEV_SIGNAL | SIGEV_THREAD_ID;
-             /* This is the thread ID of the helper thread.  */
-             sev._sigev_un._pad[0] = __helper_tid;
+             struct sigevent sev =
+               { .sigev_value.sival_ptr = newp,
+                 .sigev_signo = SIGTIMER,
+                 .sigev_notify = SIGEV_SIGNAL | SIGEV_THREAD_ID,
+                 ._sigev_un = { ._pad = { [0] = __helper_tid } } };
 
              /* Create the timer.  */
              INTERNAL_SYSCALL_DECL (err);
@@ -207,6 +207,13 @@ timer_create (
                                      syscall_clockid, &sev, &newp->ktimerid);
              if (! INTERNAL_SYSCALL_ERROR_P (res, err))
                {
+                 /* Add to the queue of active timers with thread
+                    delivery.  */
+                 pthread_mutex_lock (&__active_timer_sigev_thread_lock);
+                 newp->next = __active_timer_sigev_thread;
+                 __active_timer_sigev_thread = newp;
+                 pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
+
                  *timerid = (timer_t) newp;
                  return 0;
                }
index 9b92446..5ad40b9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -39,7 +39,8 @@ static int compat_timer_delete (timer_t timerid);
 
 
 int
-timer_delete (timer_t timerid)
+timer_delete (
+     timer_t timerid)
 {
 # undef timer_delete
 # ifndef __ASSUME_POSIX_TIMERS
@@ -53,6 +54,27 @@ timer_delete (timer_t timerid)
 
       if (res == 0)
        {
+         if (kt->sigev_notify == SIGEV_THREAD)
+           {
+             /* Remove the timer from the list.  */
+             pthread_mutex_lock (&__active_timer_sigev_thread_lock);
+             if (__active_timer_sigev_thread == kt)
+               __active_timer_sigev_thread = kt->next;
+             else
+               {
+                 struct timer *prevp = __active_timer_sigev_thread;
+                 while (prevp->next != NULL)
+                   if (prevp->next == kt)
+                     {
+                       prevp->next = kt->next;
+                       break;
+                     }
+                   else
+                     prevp = prevp->next;
+               }
+             pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
+           }
+
 # ifndef __ASSUME_POSIX_TIMERS
          /* We know the syscall support is available.  */
          __no_posix_timers = 1;
index 7afc5ec..62a558a 100644 (file)
@@ -38,7 +38,8 @@ static int compat_timer_getoverrun (timer_t timerid);
 
 
 int
-timer_getoverrun (timer_t timerid)
+timer_getoverrun (
+     timer_t timerid)
 {
 # undef timer_getoverrun
 # ifndef __ASSUME_POSIX_TIMERS
index 924c524..2681961 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
 #include "kernel-posix-timers.h"
 
 
+/* List of active SIGEV_THREAD timers.  */
+struct timer *__active_timer_sigev_thread;
+/* Lock for the __active_timer_sigev_thread.  */
+pthread_mutex_t __active_timer_sigev_thread_lock = PTHREAD_MUTEX_INITIALIZER;
+
+
+struct thread_start_data
+{
+  void (*thrfunc) (sigval_t);
+  sigval_t sival;
+};
+
+
 #ifdef __NR_timer_create
 /* Helper thread to call the user-provided function.  */
 static void *
@@ -40,10 +53,16 @@ timer_sigev_thread (void *arg)
   INTERNAL_SYSCALL_DECL (err);
   INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, NULL, _NSIG / 8);
 
-  struct timer *tk = (struct timer *) arg;
+  struct thread_start_data *td = (struct thread_start_data *) arg;
+
+  void (*thrfunc) (sigval_t) = td->thrfunc;
+  sigval_t sival = td->sival;
+
+  /* The TD object was allocated in timer_helper_thread.  */
+  free (td);
 
   /* Call the user-provided function.  */
-  tk->thrfunc (tk->sival);
+  thrfunc (sival);
 
   return NULL;
 }
@@ -83,9 +102,35 @@ timer_helper_thread (void *arg)
            {
              struct timer *tk = (struct timer *) si.si_ptr;
 
-             /* That the signal we are waiting for.  */
-             pthread_t th;
-             (void) pthread_create (&th, &tk->attr, timer_sigev_thread, tk);
+             /* Check the timer is still used and will not go away
+                while we are reading the values here.  */
+             pthread_mutex_lock (&__active_timer_sigev_thread_lock);
+
+             struct timer *runp = __active_timer_sigev_thread;
+             while (runp != NULL)
+               if (runp == tk)
+                 break;
+               else
+                 runp = runp->next;
+
+             if (runp != NULL)
+               {
+                 struct thread_start_data *td = malloc (sizeof (*td));
+
+                 /* There is not much we can do if the allocation fails.  */
+                 if (td != NULL)
+                   {
+                     /* This is the signal we are waiting for.  */
+                     td->thrfunc = tk->thrfunc;
+                     td->sival = tk->sival;
+
+                     pthread_t th;
+                     (void) pthread_create (&th, &tk->attr,
+                                            timer_sigev_thread, td);
+                   }
+               }
+
+             pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
            }
          else if (si.si_code == SI_TKILL)
            /* The thread is canceled.  */
@@ -125,7 +170,7 @@ __start_helper_thread (void)
   /* Block all signals in the helper thread but SIGSETXID.  To do this
      thoroughly we temporarily have to block all signals here.  The
      helper can lose wakeups if SIGCANCEL is not blocked throughout,
-     but sigfillset omits it SIGSETXID.  So, we add it back 
+     but sigfillset omits it SIGSETXID.  So, we add SIGCANCEL back
      explicitly here.  */
   sigset_t ss;
   sigset_t oss;
index 3595523..94c78fc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <errno.h>
 #include <stdlib.h>
-#include "fork.h"
+#include <fork.h>
 #include <atomic.h>
 
 
 void
-__unregister_atfork (void *dso_handle)
+__unregister_atfork (
+     void *dso_handle)
 {
   /* Check whether there is any entry in the list which we have to
      remove.  It is likely that this is not the case so don't bother
@@ -53,7 +54,7 @@ __unregister_atfork (void *dso_handle)
      that there couldn't have been another thread deleting something.
      The __unregister_atfork function is only called from the
      dlclose() code which itself serializes the operations.  */
-  lll_lock (__fork_lock);
+  lll_lock (__fork_lock, LLL_PRIVATE);
 
   /* We have to create a new list with all the entries we don't remove.  */
   struct deleted_handler
@@ -66,10 +67,21 @@ __unregister_atfork (void *dso_handle)
      It's a single linked list so readers are.  */
   do
     {
+    again:
       if (runp->dso_handle == dso_handle)
        {
          if (lastp == NULL)
-           __fork_handlers = runp->next;
+           {
+             /* We have to use an atomic operation here because
+                __linkin_atfork also uses one.  */
+             if (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
+                                                        runp->next, runp)
+                 != 0)
+               {
+                 runp = __fork_handlers;
+                 goto again;
+               }
+           }
          else
            lastp->next = runp->next;
 
@@ -88,7 +100,7 @@ __unregister_atfork (void *dso_handle)
   while (runp != NULL);
 
   /* Release the lock.  */
-  lll_unlock (__fork_lock);
+  lll_unlock (__fork_lock, LLL_PRIVATE);
 
   /* Walk the list of all entries which have to be deleted.  */
   while (deleted != NULL)
@@ -103,7 +115,7 @@ __unregister_atfork (void *dso_handle)
       atomic_decrement (&deleted->handler->refcntr);
       unsigned int val;
       while ((val = deleted->handler->refcntr) != 0)
-       lll_futex_wait (&deleted->handler->refcntr, val);
+       lll_futex_wait (&deleted->handler->refcntr, val, LLL_PRIVATE);
 
       deleted = deleted->next;
     }
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
deleted file mode 100644 (file)
index 3b111dd..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
-  GLIBC_2.3.3 {
-    # Changed timer_t.
-    timer_create; timer_delete; timer_getoverrun; timer_gettime;
-    timer_settime;
-  }
-}
index 57edbbb..e973bc5 100644 (file)
@@ -33,9 +33,6 @@
 /* Value returned if `sem_open' failed.  */
 #define SEM_FAILED      ((sem_t *) 0)
 
-/* Maximum value the semaphore can have.  */
-#define SEM_VALUE_MAX   (2147483647)
-
 
 typedef union
 {
index 1e461ad..b0d04c7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
 
        .text
 
-#ifndef LOCK
-# ifdef UP
-#  define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+       movl    $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+       movl    $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+       xorl    $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+       xorl    $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+       xorl    $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+       movl    %fs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+       movl    %fs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+       movl    %fs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %fs:PRIVATE_FUTEX, reg
 # else
-#  define LOCK lock
+#  define LOAD_FUTEX_WAIT(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %fs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT, reg
 # endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %fs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+       xorl    $FUTEX_PRIVATE_FLAG, reg ; \
+       andl    %fs:PRIVATE_FUTEX, reg ; \
+       orl     $FUTEX_WAKE, reg
 #endif
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
 
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday    0xffffffffff600000
 
 
-       .globl  __lll_mutex_lock_wait
-       .type   __lll_mutex_lock_wait,@function
-       .hidden __lll_mutex_lock_wait
+       .globl  __lll_lock_wait_private
+       .type   __lll_lock_wait_private,@function
+       .hidden __lll_lock_wait_private
        .align  16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
+       cfi_startproc
        pushq   %r10
+       cfi_adjust_cfa_offset(8)
        pushq   %rdx
-
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%r10, -16)
+       cfi_offset(%rdx, -24)
        xorq    %r10, %r10      /* No timeout.  */
        movl    $2, %edx
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
-#else
-       movl    $FUTEX_WAIT, %esi
-#endif
+       LOAD_PRIVATE_FUTEX_WAIT (%esi)
 
        cmpl    %edx, %eax      /* NB:   %edx == 2 */
        jne     2f
@@ -66,33 +101,144 @@ __lll_mutex_lock_wait:
        jnz     1b
 
        popq    %rdx
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%rdx)
        popq    %r10
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r10)
        retq
-       .size   __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+       cfi_endproc
+       .size   __lll_lock_wait_private,.-__lll_lock_wait_private
 
 #ifdef NOT_IN_libc
-       .globl  __lll_mutex_timedlock_wait
-       .type   __lll_mutex_timedlock_wait,@function
-       .hidden __lll_mutex_timedlock_wait
+       .globl  __lll_lock_wait
+       .type   __lll_lock_wait,@function
+       .hidden __lll_lock_wait
+       .align  16
+__lll_lock_wait:
+       cfi_startproc
+       pushq   %r10
+       cfi_adjust_cfa_offset(8)
+       pushq   %rdx
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%r10, -16)
+       cfi_offset(%rdx, -24)
+       xorq    %r10, %r10      /* No timeout.  */
+       movl    $2, %edx
+       LOAD_FUTEX_WAIT (%esi)
+
+       cmpl    %edx, %eax      /* NB:   %edx == 2 */
+       jne     2f
+
+1:     movl    $SYS_futex, %eax
+       syscall
+
+2:     movl    %edx, %eax
+       xchgl   %eax, (%rdi)    /* NB:   lock is implied */
+
+       testl   %eax, %eax
+       jnz     1b
+
+       popq    %rdx
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%rdx)
+       popq    %r10
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r10)
+       retq
+       cfi_endproc
+       .size   __lll_lock_wait,.-__lll_lock_wait
+
+       /*      %rdi: futex
+               %rsi: flags
+               %rdx: timeout
+               %eax: futex value
+       */
+       .globl  __lll_timedlock_wait
+       .type   __lll_timedlock_wait,@function
+       .hidden __lll_timedlock_wait
        .align  16
-__lll_mutex_timedlock_wait:
+__lll_timedlock_wait:
+       cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+       cmpl    $0, __have_futex_clock_realtime(%rip)
+#  else
+       cmpl    $0, __have_futex_clock_realtime
+#  endif
+       je      .Lreltmo
+# endif
+
+       pushq   %r9
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r9, 0)
+       movq    %rdx, %r10
+       movl    $0xffffffff, %r9d
+       LOAD_FUTEX_WAIT_ABS (%esi)
+
+       movl    $2, %edx
+       cmpl    %edx, %eax
+       jne     2f
+
+1:     movl    $SYS_futex, %eax
+       movl    $2, %edx
+       syscall
+
+2:     xchgl   %edx, (%rdi)    /* NB:   lock is implied */
+
+       testl   %edx, %edx
+       jz      3f
+
+       cmpl    $-ETIMEDOUT, %eax
+       je      4f
+       cmpl    $-EINVAL, %eax
+       jne     1b
+4:     movl    %eax, %edx
+       negl    %edx
+
+3:     movl    %edx, %eax
+       popq    %r9
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r9)
+       retq
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
        /* Check for a valid timeout value.  */
        cmpq    $1000000000, 8(%rdx)
        jae     3f
 
        pushq   %r8
+       cfi_adjust_cfa_offset(8)
        pushq   %r9
+       cfi_adjust_cfa_offset(8)
        pushq   %r12
+       cfi_adjust_cfa_offset(8)
        pushq   %r13
+       cfi_adjust_cfa_offset(8)
        pushq   %r14
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%r8, -16)
+       cfi_offset(%r9, -24)
+       cfi_offset(%r12, -32)
+       cfi_offset(%r13, -40)
+       cfi_offset(%r14, -48)
+       pushq   %rsi
+       cfi_adjust_cfa_offset(8)
 
        /* Stack frame for the timespec and timeval structs.  */
-       subq    $16, %rsp
+       subq    $24, %rsp
+       cfi_adjust_cfa_offset(24)
 
        movq    %rdi, %r12
        movq    %rdx, %r13
 
+       movl    $2, %edx
+       xchgl   %edx, (%r12)
+
+       testl   %edx, %edx
+       je      6f
+
 1:
        /* Get current time.  */
        movq    %rsp, %rdi
@@ -114,118 +260,137 @@ __lll_mutex_timedlock_wait:
        addq    $1000000000, %rsi
        decq    %rdi
 4:     testq   %rdi, %rdi
-       js      5f              /* Time is already up.  */
+       js      2f              /* Time is already up.  */
 
-       /* Futex call.  */
-       movq    %rdi, (%rsp)    /* Store relative timeout.  */
+       /* Store relative timeout.  */
+       movq    %rdi, (%rsp)
        movq    %rsi, 8(%rsp)
 
-       movl    $1, %eax
+       /* Futex call.  */
        movl    $2, %edx
-       LOCK
-       cmpxchgl %edx, (%r12)
-
-       testl   %eax, %eax
-       je      8f
-
+       movl    $1, %eax
        movq    %rsp, %r10
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
-#else
-       movl    $FUTEX_WAIT, %esi
-#endif
+       movl    24(%rsp), %esi
+       LOAD_FUTEX_WAIT (%esi)
        movq    %r12, %rdi
        movl    $SYS_futex, %eax
        syscall
-       movq    %rax, %rcx
 
-8:                             /* NB: %edx == 2 */
-       xorl    %eax, %eax
-       LOCK
-       cmpxchgl %edx, (%rdi)
-       jnz     7f
+       /* NB: %edx == 2 */
+       xchgl   %edx, (%r12)
+
+       testl   %edx, %edx
+       je      6f
+
+       cmpl    $-ETIMEDOUT, %eax
+       jne     1b
+2:     movl    $ETIMEDOUT, %edx
 
-6:     addq    $16, %rsp
+6:     addq    $32, %rsp
+       cfi_adjust_cfa_offset(-32)
        popq    %r14
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r14)
        popq    %r13
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r13)
        popq    %r12
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r12)
        popq    %r9
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r9)
        popq    %r8
-       retq
-
-       /* Check whether the time expired.  */
-7:     cmpq    $-ETIMEDOUT, %rcx
-       je      5f
-
-       /* Make sure the current holder knows we are going to sleep.  */
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r8)
        movl    %edx, %eax
-       xchgl   %eax, (%rdi)
-       testl   %eax, %eax
-       jz      6b
-       jmp     1b
+       retq
 
 3:     movl    $EINVAL, %eax
        retq
-
-5:     movl    $ETIMEDOUT, %eax
-       jmp     6b
-       .size   __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+# endif
+       cfi_endproc
+       .size   __lll_timedlock_wait,.-__lll_timedlock_wait
 #endif
 
 
-#ifdef NOT_IN_libc
-       .globl  lll_unlock_wake_cb
-       .type   lll_unlock_wake_cb,@function
-       .hidden lll_unlock_wake_cb
+       .globl  __lll_unlock_wake_private
+       .type   __lll_unlock_wake_private,@function
+       .hidden __lll_unlock_wake_private
        .align  16
-lll_unlock_wake_cb:
+__lll_unlock_wake_private:
+       cfi_startproc
        pushq   %rsi
+       cfi_adjust_cfa_offset(8)
        pushq   %rdx
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%rsi, -16)
+       cfi_offset(%rdx, -24)
 
-       LOCK
-       addl    $1, (%rdi)
-       jng     1f
+       movl    $0, (%rdi)
+       LOAD_PRIVATE_FUTEX_WAKE (%esi)
+       movl    $1, %edx        /* Wake one thread.  */
+       movl    $SYS_futex, %eax
+       syscall
 
        popq    %rdx
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%rdx)
        popq    %rsi
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%rsi)
        retq
-       .size   lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+       cfi_endproc
+       .size   __lll_unlock_wake_private,.-__lll_unlock_wake_private
 
-       .globl  __lll_mutex_unlock_wake
-       .type   __lll_mutex_unlock_wake,@function
-       .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+       .globl  __lll_unlock_wake
+       .type   __lll_unlock_wake,@function
+       .hidden __lll_unlock_wake
        .align  16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
+       cfi_startproc
        pushq   %rsi
+       cfi_adjust_cfa_offset(8)
        pushq   %rdx
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%rsi, -16)
+       cfi_offset(%rdx, -24)
 
        movl    $0, (%rdi)
-       movl    $FUTEX_WAKE, %esi
+       LOAD_FUTEX_WAKE (%esi)
        movl    $1, %edx        /* Wake one thread.  */
        movl    $SYS_futex, %eax
        syscall
 
        popq    %rdx
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%rdx)
        popq    %rsi
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%rsi)
        retq
-       .size   __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+       cfi_endproc
+       .size   __lll_unlock_wake,.-__lll_unlock_wake
 
-
-#ifdef NOT_IN_libc
        .globl  __lll_timedwait_tid
        .type   __lll_timedwait_tid,@function
        .hidden __lll_timedwait_tid
        .align  16
 __lll_timedwait_tid:
+       cfi_startproc
        pushq   %r12
+       cfi_adjust_cfa_offset(8)
        pushq   %r13
+       cfi_adjust_cfa_offset(8)
+       cfi_offset(%r12, -16)
+       cfi_offset(%r13, -24)
 
        movq    %rdi, %r12
        movq    %rsi, %r13
 
        subq    $16, %rsp
+       cfi_adjust_cfa_offset(16)
 
        /* Get current time.  */
 2:     movq    %rsp, %rdi
@@ -255,6 +420,8 @@ __lll_timedwait_tid:
        jz      4f
 
        movq    %rsp, %r10
+       /* XXX The kernel so far uses global futex for the wakeup at
+          all times.  */
 #if FUTEX_WAIT == 0
        xorl    %esi, %esi
 #else
@@ -269,14 +436,21 @@ __lll_timedwait_tid:
 4:     xorl    %eax, %eax
 
 8:     addq    $16, %rsp
+       cfi_adjust_cfa_offset(-16)
        popq    %r13
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r13)
        popq    %r12
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r12)
        retq
 
+       cfi_adjust_cfa_offset(32)
 1:     cmpq    $-ETIMEDOUT, %rax
        jne     2b
 
 6:     movl    $ETIMEDOUT, %eax
        jmp     8b
+       cfi_endproc
        .size   __lll_timedwait_tid,.-__lll_timedwait_tid
 #endif
index c9f30e9..7c042fc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
 #ifndef _LOWLEVELLOCK_H
 #define _LOWLEVELLOCK_H        1
 
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <atomic.h>
-#include <sysdep.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-#  define LOCK_INSTR   /* nothing */
-# else
-#  define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+#  ifdef UP
+#   define LOCK_INSTR  /* nothing */
+#  else
+#   define LOCK_INSTR "lock;"
+#  endif
+# endif
+#else
+# ifndef LOCK
+#  ifdef UP
+#   define LOCK
+#  else
+#   define LOCK lock
+#  endif
 # endif
 #endif
 
 #define FUTEX_WAIT             0
 #define FUTEX_WAKE             1
+#define FUTEX_CMP_REQUEUE      4
+#define FUTEX_WAKE_OP          5
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_WAIT_REQUEUE_PI  11
+#define FUTEX_CMP_REQUEUE_PI   12
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE  ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+#ifndef __ASSEMBLER__
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)                                            \
+   ? ((private) == 0                                                         \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))           \
+      : (fl))                                                                \
+   : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG);                \
+       __asm__ ("andl %%fs:%P1, %0" : "+r" (__fl)                                    \
+            : "i" (offsetof (struct pthread, header.private_futex)));        \
+       __fl | (fl); }))
+# endif
+#endif
 
-
-/* Initializer for compatibility lock.  */
-#define LLL_MUTEX_LOCK_INITIALIZER             (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED      (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS     (2)
+/* Initializer for lock.  */
+#define LLL_LOCK_INITIALIZER           (0)
+#define LLL_LOCK_INITIALIZER_LOCKED    (1)
+#define LLL_LOCK_INITIALIZER_WAITERS   (2)
 
 /* Delay in spinlock loop.  */
-#define BUSY_WAIT_NOP          __asm__ ("rep; nop")
-
-
-#define lll_futex_wait(futex, val) \
-  do {                                                                       \
-    int __ignore;                                                            \
+#define BUSY_WAIT_NOP    __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+       ".section       .eh_frame,\"a\",@progbits\n"            \
+"7:\t" ".long  9f-8f   # Length of Common Information Entry\n" \
+"8:\t" ".long  0x0     # CIE Identifier Tag\n\t"               \
+       ".byte  0x1     # CIE Version\n\t"                      \
+       ".ascii \"zR\\0\"       # CIE Augmentation\n\t"         \
+       ".uleb128 0x1   # CIE Code Alignment Factor\n\t"        \
+       ".sleb128 -8    # CIE Data Alignment Factor\n\t"        \
+       ".byte  0x10    # CIE RA Column\n\t"                    \
+       ".uleb128 0x1   # Augmentation size\n\t"                \
+       ".byte  0x1b    # FDE Encoding (pcrel sdata4)\n\t"      \
+       ".byte  0x12    # DW_CFA_def_cfa_sf\n\t"                \
+       ".uleb128 0x7\n\t"                                      \
+       ".sleb128 16\n\t"                                       \
+       ".align 8\n"                                            \
+"9:\t" ".long  23f-10f # FDE Length\n"                         \
+"10:\t"        ".long  10b-7b  # FDE CIE offset\n\t"                   \
+       ".long  1b-.    # FDE initial location\n\t"             \
+       ".long  6b-1b   # FDE address range\n\t"                \
+       ".uleb128 0x0   # Augmentation size\n\t"                \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x10\n\t"                                     \
+       ".uleb128 12f-11f\n"                                    \
+"11:\t"        ".byte  0x80    # DW_OP_breg16\n\t"                     \
+       ".sleb128 4b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x10\n\t"                                     \
+       ".uleb128 14f-13f\n"                                    \
+"13:\t"        ".byte  0x80    # DW_OP_breg16\n\t"                     \
+       ".sleb128 4b-2b\n"                                      \
+"14:\t"        ".byte  0x40 + (3b-2b) # DW_CFA_advance_loc\n\t"        \
+       ".byte  0x0e    # DW_CFA_def_cfa_offset\n\t"            \
+       ".uleb128 0\n\t"                                        \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x10\n\t"                                     \
+       ".uleb128 16f-15f\n"                                    \
+"15:\t"        ".byte  0x80    # DW_OP_breg16\n\t"                     \
+       ".sleb128 4b-3b\n"                                      \
+"16:\t"        ".byte  0x40 + (4b-3b-1) # DW_CFA_advance_loc\n\t"      \
+       ".byte  0x0e    # DW_CFA_def_cfa_offset\n\t"            \
+       ".uleb128 128\n\t"                                      \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x10\n\t"                                     \
+       ".uleb128 20f-17f\n"                                    \
+"17:\t"        ".byte  0x80    # DW_OP_breg16\n\t"                     \
+       ".sleb128 19f-18f\n\t"                                  \
+       ".byte  0x0d    # DW_OP_const4s\n"                      \
+"18:\t"        ".4byte 4b-.\n\t"                                       \
+       ".byte  0x1c    # DW_OP_minus\n\t"                      \
+       ".byte  0x0d    # DW_OP_const4s\n"                      \
+"19:\t"        ".4byte 24f-.\n\t"                                      \
+       ".byte  0x22    # DW_OP_plus\n"                         \
+"20:\t"        ".byte  0x40 + (5b-4b+1) # DW_CFA_advance_loc\n\t"      \
+       ".byte  0x13    # DW_CFA_def_cfa_offset_sf\n\t"         \
+       ".sleb128 16\n\t"                                       \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x10\n\t"                                     \
+       ".uleb128 22f-21f\n"                                    \
+"21:\t"        ".byte  0x80    # DW_OP_breg16\n\t"                     \
+       ".sleb128 4b-5b\n"                                      \
+"22:\t"        ".align 8\n"                                            \
+"23:\t"        ".previous\n"
+
+/* Unwind info for
+   1: leaq ..., %rdi
+   2: subq $128, %rsp
+   3: callq ...
+   4: addq $128, %rsp
+   5: jmp 24f
+   6:
+   snippet.  */
+#define LLL_STUB_UNWIND_INFO_5 \
+LLL_STUB_UNWIND_INFO_START                                     \
+"12:\t"        ".byte  0x40 + (2b-1b) # DW_CFA_advance_loc\n\t"        \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+   1: leaq ..., %rdi
+   0: movq ..., %rdx
+   2: subq $128, %rsp
+   3: callq ...
+   4: addq $128, %rsp
+   5: jmp 24f
+   6:
+   snippet.  */
+#define LLL_STUB_UNWIND_INFO_6 \
+LLL_STUB_UNWIND_INFO_START                                     \
+"12:\t"        ".byte  0x40 + (0b-1b) # DW_CFA_advance_loc\n\t"        \
+       ".byte  0x16    # DW_CFA_val_expression\n\t"            \
+       ".uleb128 0x10\n\t"                                     \
+       ".uleb128 26f-25f\n"                                    \
+"25:\t"        ".byte  0x80    # DW_OP_breg16\n\t"                     \
+       ".sleb128 4b-0b\n"                                      \
+"26:\t"        ".byte  0x40 + (2b-0b) # DW_CFA_advance_loc\n\t"        \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+  lll_futex_timed_wait(futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+  ({                                                                         \
+    register const struct timespec *__to __asm__ ("r10") = timeout;          \
+    int __status;                                                            \
     register __typeof (val) _val __asm__ ("edx") = (val);                            \
-    __asm__ __volatile ("xorq %%r10, %%r10\n\t"                                      \
-                     "syscall"                                               \
-                     : "=a" (__ignore)                                       \
-                     : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAIT),       \
-                       "d" (_val)                                            \
-                     : "memory", "cc", "r10", "r11", "cx");                  \
-  } while (0)
+    __asm__ __volatile ("syscall"                                                    \
+                     : "=a" (__status)                                       \
+                     : "0" (SYS_futex), "D" (futex),                         \
+                       "S" (__lll_private_flag (FUTEX_WAIT, private)),       \
+                       "d" (_val), "r" (__to)                                \
+                     : "memory", "cc", "r11", "cx");                         \
+    __status;                                                                \
+  })
 
 
-#define lll_futex_wake(futex, nr) \
+#define lll_futex_wake(futex, nr, private) \
   do {                                                                       \
     int __ignore;                                                            \
     register __typeof (nr) _nr __asm__ ("edx") = (nr);                       \
     __asm__ __volatile ("syscall"                                                    \
                      : "=a" (__ignore)                                       \
-                     : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAKE),       \
+                     : "0" (SYS_futex), "D" (futex),                         \
+                       "S" (__lll_private_flag (FUTEX_WAKE, private)),       \
                        "d" (_nr)                                             \
                      : "memory", "cc", "r10", "r11", "cx");                  \
   } while (0)
 
 
-/* Does not preserve %eax and %ecx.  */
-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
-/* Does not preserver %eax, %ecx, and %edx.  */
-extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
-                                      const struct timespec *__abstime)
-     attribute_hidden;
-/* Preserves all registers but %eax.  */
-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
    after the cmpxchg instruction.  In case the operation succeded this
    value is zero.  In case the operation failed, the cmpxchg instruction
    has loaded the current value of the memory work which is guaranteed
    to be nonzero.  */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t"      \
+                          "je 0f\n\t"                                        \
+                          "lock; cmpxchgl %2, %1\n\t"                        \
+                          "jmp 1f\n\t"                                       \
+                          "0:\tcmpxchgl %2, %1\n\t"                          \
+                          "1:"
+#endif
+
+#define lll_trylock(futex) \
   ({ int ret;                                                                \
-     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"                        \
+     __asm__ __volatile (__lll_trylock_asm                                   \
                       : "=a" (ret), "=m" (futex)                             \
-                      : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-                        "0" (LLL_MUTEX_LOCK_INITIALIZER)                     \
+                      : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
+                        "0" (LLL_LOCK_INITIALIZER)                           \
                       : "memory");                                           \
      ret; })
 
-
-#define lll_mutex_cond_trylock(futex) \
+#define lll_robust_trylock(futex, id) \
   ({ int ret;                                                                \
      __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"                        \
                       : "=a" (ret), "=m" (futex)                             \
-                      : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS),            \
-                        "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER)        \
+                      : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER)    \
                       : "memory");                                           \
      ret; })
 
+#define lll_cond_trylock(futex) \
+  ({ int ret;                                                                \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"                        \
+                      : "=a" (ret), "=m" (futex)                             \
+                      : "r" (LLL_LOCK_INITIALIZER_WAITERS),                  \
+                        "m" (futex), "0" (LLL_LOCK_INITIALIZER)              \
+                      : "memory");                                           \
+     ret; })
 
-#define lll_mutex_lock(futex) \
-  (void) ({ int ignore1, ignore2, ignore3;                                   \
-           __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"                      \
-                             "jnz 1f\n\t"                                    \
-                             ".subsection 1\n"                               \
-                             "1:\tleaq %2, %%rdi\n\t"                        \
-                             "subq $128, %%rsp\n\t"                          \
-                             "callq __lll_mutex_lock_wait\n\t"               \
-                             "addq $128, %%rsp\n\t"                          \
-                             "jmp 2f\n\t"                                    \
-                             ".previous\n"                                   \
-                             "2:"                                            \
-                             : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
-                               "=a" (ignore3)                                \
-                             : "0" (1), "m" (futex), "3" (0)                 \
-                             : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_cond_lock(futex) \
-  (void) ({ int ignore1, ignore2, ignore3;                                   \
-           __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"                      \
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t"               \
+                             "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
+                             "je 0f\n\t"                                     \
+                             "lock; cmpxchgl %4, %2\n\t"                     \
                              "jnz 1f\n\t"                                    \
-                             ".subsection 1\n"                               \
-                             "1:\tleaq %2, %%rdi\n\t"                        \
-                             "subq $128, %%rsp\n\t"                          \
-                             "callq __lll_mutex_lock_wait\n\t"               \
-                             "addq $128, %%rsp\n\t"                          \
-                             "jmp 2f\n\t"                                    \
-                             ".previous\n"                                   \
-                             "2:"                                            \
-                             : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
-                               "=a" (ignore3)                                \
-                             : "0" (2), "m" (futex), "3" (0)                 \
-                             : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_timedlock(futex, timeout) \
-  ({ int _result, ignore1, ignore2, ignore3;                                 \
-     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t"                            \
+                             "jmp 24f\n"                                     \
+                             "0:\tcmpxchgl %4, %2\n\t"                       \
+                             "jnz 1f\n\t"
+#endif
+
+#define lll_lock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore1, ignore2, ignore3;                                        \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+        __asm__ __volatile (__lll_lock_asm_start                                     \
+                          ".subsection 1\n\t"                                \
+                          ".type _L_lock_%=, @function\n"                    \
+                          "_L_lock_%=:\n"                                    \
+                          "1:\tleaq %2, %%rdi\n"                             \
+                          "2:\tsubq $128, %%rsp\n"                           \
+                          "3:\tcallq __lll_lock_wait_private\n"              \
+                          "4:\taddq $128, %%rsp\n"                           \
+                          "5:\tjmp 24f\n"                                    \
+                          "6:\t.size _L_lock_%=, 6b-1b\n\t"                  \
+                          ".previous\n"                                      \
+                          LLL_STUB_UNWIND_INFO_5                             \
+                          "24:"                                              \
+                          : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),   \
+                            "=a" (ignore3)                                   \
+                          : "0" (1), "m" (futex), "3" (0)                    \
+                          : "cx", "r11", "cc", "memory");                    \
+       else                                                                  \
+        __asm__ __volatile (__lll_lock_asm_start                                     \
+                          ".subsection 1\n\t"                                \
+                          ".type _L_lock_%=, @function\n"                    \
+                          "_L_lock_%=:\n"                                    \
+                          "1:\tleaq %2, %%rdi\n"                             \
+                          "2:\tsubq $128, %%rsp\n"                           \
+                          "3:\tcallq __lll_lock_wait\n"                      \
+                          "4:\taddq $128, %%rsp\n"                           \
+                          "5:\tjmp 24f\n"                                    \
+                          "6:\t.size _L_lock_%=, 6b-1b\n\t"                  \
+                          ".previous\n"                                      \
+                          LLL_STUB_UNWIND_INFO_5                             \
+                          "24:"                                              \
+                          : "=S" (ignore1), "=D" (ignore2), "=m" (futex),    \
+                            "=a" (ignore3)                                   \
+                          : "1" (1), "m" (futex), "3" (0), "0" (private)     \
+                          : "cx", "r11", "cc", "memory");                    \
+    })                                                                       \
+
+#define lll_robust_lock(futex, id, private) \
+  ({ int result, ignore1, ignore2;                                           \
+    __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"                             \
+                     "jnz 1f\n\t"                                            \
+                     ".subsection 1\n\t"                                     \
+                     ".type _L_robust_lock_%=, @function\n"                  \
+                     "_L_robust_lock_%=:\n"                                  \
+                     "1:\tleaq %2, %%rdi\n"                                  \
+                     "2:\tsubq $128, %%rsp\n"                                \
+                     "3:\tcallq __lll_robust_lock_wait\n"                    \
+                     "4:\taddq $128, %%rsp\n"                                \
+                     "5:\tjmp 24f\n"                                         \
+                     "6:\t.size _L_robust_lock_%=, 6b-1b\n\t"                \
+                     ".previous\n"                                           \
+                     LLL_STUB_UNWIND_INFO_5                                  \
+                     "24:"                                                   \
+                     : "=S" (ignore1), "=D" (ignore2), "=m" (futex),         \
+                       "=a" (result)                                         \
+                     : "1" (id), "m" (futex), "3" (0), "0" (private)         \
+                     : "cx", "r11", "cc", "memory");                         \
+    result; })
+
+#define lll_cond_lock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore1, ignore2, ignore3;                                        \
+       __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"                  \
+                        "jnz 1f\n\t"                                         \
+                        ".subsection 1\n\t"                                  \
+                        ".type _L_cond_lock_%=, @function\n"                 \
+                        "_L_cond_lock_%=:\n"                                 \
+                        "1:\tleaq %2, %%rdi\n"                               \
+                        "2:\tsubq $128, %%rsp\n"                             \
+                        "3:\tcallq __lll_lock_wait\n"                        \
+                        "4:\taddq $128, %%rsp\n"                             \
+                        "5:\tjmp 24f\n"                                      \
+                        "6:\t.size _L_cond_lock_%=, 6b-1b\n\t"               \
+                        ".previous\n"                                        \
+                        LLL_STUB_UNWIND_INFO_5                               \
+                        "24:"                                                \
+                        : "=S" (ignore1), "=D" (ignore2), "=m" (futex),      \
+                          "=a" (ignore3)                                     \
+                        : "1" (2), "m" (futex), "3" (0), "0" (private)       \
+                        : "cx", "r11", "cc", "memory");                      \
+    })
+
+#define lll_robust_cond_lock(futex, id, private) \
+  ({ int result, ignore1, ignore2;                                           \
+    __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"                             \
+                     "jnz 1f\n\t"                                            \
+                     ".subsection 1\n\t"                                     \
+                     ".type _L_robust_cond_lock_%=, @function\n"             \
+                     "_L_robust_cond_lock_%=:\n"                             \
+                     "1:\tleaq %2, %%rdi\n"                                  \
+                     "2:\tsubq $128, %%rsp\n"                                \
+                     "3:\tcallq __lll_robust_lock_wait\n"                    \
+                     "4:\taddq $128, %%rsp\n"                                \
+                     "5:\tjmp 24f\n"                                         \
+                     "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t"           \
+                     ".previous\n"                                           \
+                     LLL_STUB_UNWIND_INFO_5                                  \
+                     "24:"                                                   \
+                     : "=S" (ignore1), "=D" (ignore2), "=m" (futex),         \
+                       "=a" (result)                                         \
+                     : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0),       \
+                       "0" (private)                                         \
+                     : "cx", "r11", "cc", "memory");                         \
+    result; })
+
+#define lll_timedlock(futex, timeout, private) \
+  ({ int result, ignore1, ignore2, ignore3;                                  \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"                            \
                       "jnz 1f\n\t"                                           \
-                      ".subsection 1\n"                                      \
-                      "1:\tleaq %4, %%rdi\n\t"                               \
-                      "movq %8, %%rdx\n\t"                                   \
-                      "subq $128, %%rsp\n\t"                                 \
-                      "callq __lll_mutex_timedlock_wait\n\t"                 \
-                      "addq $128, %%rsp\n\t"                                 \
-                      "jmp 2f\n\t"                                           \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_timedlock_%=, @function\n"                   \
+                      "_L_timedlock_%=:\n"                                   \
+                      "1:\tleaq %4, %%rdi\n"                                 \
+                      "0:\tmovq %8, %%rdx\n"                                 \
+                      "2:\tsubq $128, %%rsp\n"                               \
+                      "3:\tcallq __lll_timedlock_wait\n"                     \
+                      "4:\taddq $128, %%rsp\n"                               \
+                      "5:\tjmp 24f\n"                                        \
+                      "6:\t.size _L_timedlock_%=, 6b-1b\n\t"                 \
                       ".previous\n"                                          \
-                      "2:"                                                   \
-                      : "=a" (_result), "=&D" (ignore1), "=S" (ignore2),      \
+                      LLL_STUB_UNWIND_INFO_6                                 \
+                      "24:"                                                  \
+                      : "=a" (result), "=D" (ignore1), "=S" (ignore2),       \
                         "=&d" (ignore3), "=m" (futex)                        \
-                      : "0" (0), "2" (1), "m" (futex), "m" (timeout)         \
+                      : "0" (0), "1" (1), "m" (futex), "m" (timeout),        \
+                        "2" (private)                                        \
                       : "memory", "cx", "cc", "r10", "r11");                 \
-     _result; })
-
-
-#define lll_mutex_unlock(futex) \
-  (void) ({ int ignore;                                                              \
-            __asm__ __volatile (LOCK_INSTR "decl %0\n\t"                             \
-                             "jne 1f\n\t"                                    \
-                             ".subsection 1\n"                               \
-                             "1:\tleaq %0, %%rdi\n\t"                        \
-                             "subq $128, %%rsp\n\t"                          \
-                             "callq __lll_mutex_unlock_wake\n\t"             \
-                             "addq $128, %%rsp\n\t"                          \
-                             "jmp 2f\n\t"                                    \
-                             ".previous\n"                                   \
-                             "2:"                                            \
-                             : "=m" (futex), "=&D" (ignore)                  \
-                             : "m" (futex)                                   \
-                             : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_islocked(futex) \
-  (futex != LLL_MUTEX_LOCK_INITIALIZER)
-
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
-/* Initializers for lock.  */
-#define LLL_LOCK_INITIALIZER           (0)
-#define LLL_LOCK_INITIALIZER_LOCKED    (1)
-
-
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-    2  -  taken by more users */
+     result; })
 
+#define lll_robust_timedlock(futex, timeout, id, private) \
+  ({ int result, ignore1, ignore2, ignore3;                                  \
+     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"                            \
+                      "jnz 1f\n\t"                                           \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_robust_timedlock_%=, @function\n"            \
+                      "_L_robust_timedlock_%=:\n"                            \
+                      "1:\tleaq %4, %%rdi\n"                                 \
+                      "0:\tmovq %8, %%rdx\n"                                 \
+                      "2:\tsubq $128, %%rsp\n"                               \
+                      "3:\tcallq __lll_robust_timedlock_wait\n"              \
+                      "4:\taddq $128, %%rsp\n"                               \
+                      "5:\tjmp 24f\n"                                        \
+                      "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t"          \
+                      ".previous\n"                                          \
+                      LLL_STUB_UNWIND_INFO_6                                 \
+                      "24:"                                                  \
+                      : "=a" (result), "=D" (ignore1), "=S" (ignore2),       \
+                        "=&d" (ignore3), "=m" (futex)                        \
+                      : "0" (0), "1" (id), "m" (futex), "m" (timeout),       \
+                        "2" (private)                                        \
+                      : "memory", "cx", "cc", "r10", "r11");                 \
+     result; })
 
 #if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t"                     \
+                               "jne 1f\n\t"
 #else
-/* Special versions of the macros for use in libc itself.  They avoid
-   the lock prefix when the thread library is not used.
-
-   The code sequence to avoid unnecessary lock prefixes is what the AMD
-   guys suggested.  If you do not like it, bring it up with AMD.
-
-   XXX In future we might even want to avoid it on UP machines.  */
-
-# define lll_trylock(futex) \
-  ({ unsigned char ret;                                                              \
-     __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t"       \
-                      "je 0f\n\t"                                            \
-                      "lock; cmpxchgl %2, %1\n\t"                            \
-                      "jmp 1f\n"                                             \
-                      "0:\tcmpxchgl %2, %1\n\t"                              \
-                      "1:setne %0"                                           \
-                      : "=a" (ret), "=m" (futex)                             \
-                      : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-                        "0" (LLL_MUTEX_LOCK_INITIALIZER)                     \
-                      : "memory");                                           \
-     ret; })
-
-
-# define lll_lock(futex) \
-  (void) ({ int ignore1, ignore2, ignore3;                                   \
-           __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
-                             "je 0f\n\t"                                     \
-                             "lock; cmpxchgl %0, %2\n\t"                     \
-                             "jnz 1f\n\t"                                    \
-                             "jmp 2f\n"                                      \
-                             "0:\tcmpxchgl %0, %2\n\t"                       \
-                             "jnz 1f\n\t"                                    \
-                             ".subsection 1\n"                               \
-                             "1:\tleaq %2, %%rdi\n\t"                        \
-                             "subq $128, %%rsp\n\t"                          \
-                             "callq __lll_mutex_lock_wait\n\t"               \
-                             "addq $128, %%rsp\n\t"                          \
-                             "jmp 2f\n\t"                                    \
-                             ".previous\n"                                   \
-                             "2:"                                            \
-                             : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
-                               "=a" (ignore3)                                \
-                             : "0" (1), "m" (futex), "3" (0)                 \
-                             : "cx", "r11", "cc", "memory"); })
-
-
-# define lll_unlock(futex) \
-  (void) ({ int ignore;                                                              \
-            __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
-                             "je 0f\n\t"                                     \
-                             "lock; decl %0\n\t"                             \
-                             "jne 1f\n\t"                                    \
-                             "jmp 2f\n"                                      \
-                             "0:\tdecl %0\n\t"                               \
-                             "jne 1f\n\t"                                    \
-                             ".subsection 1\n"                               \
-                             "1:\tleaq %0, %%rdi\n\t"                        \
-                             "subq $128, %%rsp\n\t"                          \
-                             "callq __lll_mutex_unlock_wake\n\t"             \
-                             "addq $128, %%rsp\n\t"                          \
-                             "jmp 2f\n\t"                                    \
-                             ".previous\n"                                   \
-                             "2:"                                            \
-                             : "=m" (futex), "=&D" (ignore)                  \
-                             : "m" (futex)                                   \
-                             : "ax", "cx", "r11", "cc", "memory"); })
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+                               "je 0f\n\t"                                   \
+                               "lock; decl %0\n\t"                           \
+                               "jne 1f\n\t"                                  \
+                               "jmp 24f\n\t"                                 \
+                               "0:\tdecl %0\n\t"                             \
+                               "jne 1f\n\t"
 #endif
 
+#define lll_unlock(futex, private) \
+  (void)                                                                     \
+    ({ int ignore;                                                           \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)       \
+        __asm__ __volatile (__lll_unlock_asm_start                           \
+                          ".subsection 1\n\t"                                \
+                          ".type _L_unlock_%=, @function\n"                  \
+                          "_L_unlock_%=:\n"                                  \
+                          "1:\tleaq %0, %%rdi\n"                             \
+                          "2:\tsubq $128, %%rsp\n"                           \
+                          "3:\tcallq __lll_unlock_wake_private\n"            \
+                          "4:\taddq $128, %%rsp\n"                           \
+                          "5:\tjmp 24f\n"                                    \
+                          "6:\t.size _L_unlock_%=, 6b-1b\n\t"                \
+                          ".previous\n"                                      \
+                          LLL_STUB_UNWIND_INFO_5                             \
+                          "24:"                                              \
+                          : "=m" (futex), "=&D" (ignore)                     \
+                          : "m" (futex)                                      \
+                          : "ax", "cx", "r11", "cc", "memory");              \
+       else                                                                  \
+        __asm__ __volatile (__lll_unlock_asm_start                           \
+                          ".subsection 1\n\t"                                \
+                          ".type _L_unlock_%=, @function\n"                  \
+                          "_L_unlock_%=:\n"                                  \
+                          "1:\tleaq %0, %%rdi\n"                             \
+                          "2:\tsubq $128, %%rsp\n"                           \
+                          "3:\tcallq __lll_unlock_wake\n"                    \
+                          "4:\taddq $128, %%rsp\n"                           \
+                          "5:\tjmp 24f\n"                                    \
+                          "6:\t.size _L_unlock_%=, 6b-1b\n\t"                \
+                          ".previous\n"                                      \
+                          LLL_STUB_UNWIND_INFO_5                             \
+                          "24:"                                              \
+                          : "=m" (futex), "=&D" (ignore)                     \
+                          : "m" (futex), "S" (private)                       \
+                          : "ax", "cx", "r11", "cc", "memory");              \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  do                                                                         \
+    {                                                                        \
+      int ignore;                                                            \
+      __asm__ __volatile (LOCK_INSTR "andl %2, %0\n\t"                       \
+                       "jne 1f\n\t"                                          \
+                       ".subsection 1\n\t"                                   \
+                       ".type _L_robust_unlock_%=, @function\n"              \
+                       "_L_robust_unlock_%=:\n"                              \
+                       "1:\tleaq %0, %%rdi\n"                                \
+                       "2:\tsubq $128, %%rsp\n"                              \
+                       "3:\tcallq __lll_unlock_wake\n"                       \
+                       "4:\taddq $128, %%rsp\n"                              \
+                       "5:\tjmp 24f\n"                                       \
+                       "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t"            \
+                       ".previous\n"                                         \
+                       LLL_STUB_UNWIND_INFO_5                                \
+                       "24:"                                                 \
+                       : "=m" (futex), "=&D" (ignore)                        \
+                       : "i" (FUTEX_WAITERS), "m" (futex),                   \
+                         "S" (private)                                       \
+                       : "ax", "cx", "r11", "cc", "memory");                 \
+    }                                                                        \
+  while (0)
+
+#define lll_robust_dead(futex, private) \
+  do                                                                         \
+    {                                                                        \
+      int ignore;                                                            \
+      __asm__ __volatile (LOCK_INSTR "orl %3, (%2)\n\t"                              \
+                       "syscall"                                             \
+                       : "=m" (futex), "=a" (ignore)                         \
+                       : "D" (&(futex)), "i" (FUTEX_OWNER_DIED),             \
+                         "S" (__lll_private_flag (FUTEX_WAKE, private)),     \
+                         "1" (__NR_futex), "d" (1)                           \
+                       : "cx", "r11", "cc", "memory");                       \
+    }                                                                        \
+  while (0)
+
+/* Returns non-zero if error happened, zero if success.  */
+#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \
+  ({ int __res;                                                                      \
+     register int __nr_move __asm__ ("r10") = nr_move;                       \
+     register void *__mutex __asm__ ("r8") = mutex;                          \
+     register int __val __asm__ ("r9") = val;                                \
+     __asm__ __volatile ("syscall"                                           \
+                      : "=a" (__res)                                         \
+                      : "0" (__NR_futex), "D" ((void *) ftx),                \
+                        "S" (__lll_private_flag (FUTEX_CMP_REQUEUE,          \
+                                                 private)), "d" (nr_wake),   \
+                        "r" (__nr_move), "r" (__mutex), "r" (__val)          \
+                      : "cx", "r11", "cc", "memory");                        \
+     __res < 0; })
 
 #define lll_islocked(futex) \
-  (futex != LLL_MUTEX_LOCK_INITIALIZER)
+  (futex != LLL_LOCK_INITIALIZER)
 
 
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
@@ -318,25 +593,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
       }                                                                              \
     __result; })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-                                const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
+#endif  /* !__ASSEMBLER__ */
 
 #endif /* lowlevellock.h */
index c20ef73..df49496 100644 (file)
@@ -16,6 +16,8 @@
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <tcb-offsets.h>
+
 #define SAVE_PID \
        movl    %fs:PID, %esi;                                                \
        movl    %esi, %edx;                                                   \
index f6e15a2..15ad534 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelbarrier.h>
 
-#define FUTEX_WAIT     0
-#define FUTEX_WAKE     1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
 
        .text
 
@@ -64,9 +56,10 @@ pthread_barrier_wait:
           if the CURR_EVENT memory has meanwhile been changed.  */
 7:
 #if FUTEX_WAIT == 0
-       xorl    %esi, %esi
+       movl    PRIVATE(%rdi), %esi
 #else
        movl    $FUTEX_WAIT, %esi
+       orl     PRIVATE(%rdi), %esi
 #endif
        xorq    %r10, %r10
 8:     movl    $SYS_futex, %eax
@@ -115,6 +108,7 @@ pthread_barrier_wait:
           so 0x7fffffff is the highest value.  */
        movl    $0x7fffffff, %edx
        movl    $FUTEX_WAKE, %esi
+       orl     PRIVATE(%rdi), %esi
        movl    $SYS_futex, %eax
        syscall
 
@@ -139,21 +133,29 @@ pthread_barrier_wait:
 
        retq
 
-1:     addq    $MUTEX, %rdi
-       callq   __lll_mutex_lock_wait
+1:     movl    PRIVATE(%rdi), %esi
+       addq    $MUTEX, %rdi
+       xorl    $LLL_SHARED, %esi
+       callq   __lll_lock_wait
        subq    $MUTEX, %rdi
        jmp     2b
 
-4:     addq    $MUTEX, %rdi
-       callq   __lll_mutex_unlock_wake
+4:     movl    PRIVATE(%rdi), %esi
+       addq    $MUTEX, %rdi
+       xorl    $LLL_SHARED, %esi
+       callq   __lll_unlock_wake
        jmp     5b
 
-6:     addq    $MUTEX, %rdi
-       callq   __lll_mutex_unlock_wake
+6:     movl    PRIVATE(%rdi), %esi
+       addq    $MUTEX, %rdi
+       xorl    $LLL_SHARED, %esi
+       callq   __lll_unlock_wake
        subq    $MUTEX, %rdi
        jmp     7b
 
-9:     addq    $MUTEX, %rdi
-       callq   __lll_mutex_unlock_wake
+9:     movl    PRIVATE(%rdi), %esi
+       addq    $MUTEX, %rdi
+       xorl    $LLL_SHARED, %esi
+       callq   __lll_unlock_wake
        jmp     10b
        .size   pthread_barrier_wait,.-pthread_barrier_wait
index d8ebdfa..0f8037b 100644 (file)
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-#define FUTEX_REQUEUE          3
-#define FUTEX_CMP_REQUEUE      4
-
-#define EINVAL                 22
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
 
 
        .text
@@ -78,8 +69,23 @@ __pthread_cond_broadcast:
 8:     cmpq    $-1, %r8
        je      9f
 
+       /* Do not use requeue for pshared condvars.  */
+       testl   $PS_BIT, MUTEX_KIND(%r8)
+       jne     9f
+
+       /* Requeue to a PI mutex if the PI bit is set.  */
+       movl    MUTEX_KIND(%r8), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       je      81f
+
        /* Wake up all threads.  */
-       movl    $FUTEX_CMP_REQUEUE, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi
+#else
+       movl    %fs:PRIVATE_FUTEX, %esi
+       orl     $FUTEX_CMP_REQUEUE, %esi
+#endif
        movl    $SYS_futex, %eax
        movl    $1, %edx
        movl    $0x7fffffff, %r10d
@@ -94,6 +100,20 @@ __pthread_cond_broadcast:
 10:    xorl    %eax, %eax
        retq
 
+       /* Wake up all threads.  */
+81:    movl    $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+       movl    $SYS_futex, %eax
+       movl    $1, %edx
+       movl    $0x7fffffff, %r10d
+       syscall
+
+       /* For any kind of error, which mainly is EAGAIN, we try again
+          with WAKE.  The general test also covers running on old
+          kernels.  */
+       cmpq    $-4095, %rax
+       jb      10b
+       jmp     9f
+
        .align  16
        /* Unlock.  */
 4:     LOCK
@@ -108,7 +128,11 @@ __pthread_cond_broadcast:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
 #if cond_lock != 0
        subq    $cond_lock, %rdi
 #endif
@@ -116,21 +140,38 @@ __pthread_cond_broadcast:
 
        /* Unlock in loop requires wakeup.  */
 5:     addq    $cond_lock-cond_futex, %rdi
-       callq   __lll_mutex_unlock_wake
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
        jmp     6b
 
        /* Unlock in loop requires wakeup.  */
 7:     addq    $cond_lock-cond_futex, %rdi
-       callq   __lll_mutex_unlock_wake
+       cmpq    $-1, %r8
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
        subq    $cond_lock-cond_futex, %rdi
        jmp     8b
 
 9:     /* The futex requeue functionality is not available.  */
+       cmpq    $-1, %r8
        movl    $0x7fffffff, %edx
-       movl    $FUTEX_WAKE, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
+#else
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
+#endif
        movl    $SYS_futex, %eax
        syscall
        jmp     10b
        .size   __pthread_cond_broadcast, .-__pthread_cond_broadcast
 weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
-
index c7cc3dd..568c984 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
 #include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-#define FUTEX_REQUEUE          3
-
-#define EINVAL                 22
+#include <pthread-errnos.h>
 
 
        .text
@@ -64,9 +55,66 @@ __pthread_cond_signal:
        addl    $1, (%rdi)
 
        /* Wake up one thread.  */
-       movl    $FUTEX_WAKE, %esi
-       movl    $SYS_futex, %eax
+       cmpq    $-1, dep_mutex(%r8)
+       movl    $FUTEX_WAKE_OP, %esi
        movl    $1, %edx
+       movl    $SYS_futex, %eax
+       je      8f
+
+       /* Get the address of the mutex used.  */
+       movq    dep_mutex(%r8), %rcx
+       movl    MUTEX_KIND(%rcx), %r11d
+       andl    $(ROBUST_BIT|PI_BIT), %r11d
+       cmpl    $PI_BIT, %r11d
+       je      9f
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi
+#else
+       orl     %fs:PRIVATE_FUTEX, %esi
+#endif
+
+8:     movl    $1, %r10d
+#if cond_lock != 0
+       addq    $cond_lock, %r8
+#endif
+       movl    $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d
+       syscall
+#if cond_lock != 0
+       subq    $cond_lock, %r8
+#endif
+       /* For any kind of error, we try again with WAKE.
+          The general test also covers running on old kernels.  */
+       cmpq    $-4095, %rax
+       jae     7f
+
+       xorl    %eax, %eax
+       retq
+
+       /* Wake up one thread and requeue none in the PI Mutex case.  */
+9:     movl    $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+       movq    %rcx, %r8
+       xorq    %r10, %r10
+       movl    (%rdi), %r9d    // XXX Can this be right?
+       syscall
+
+       leaq    -cond_futex(%rdi), %r8
+
+       /* For any kind of error, we try again with WAKE.
+          The general test also covers running on old kernels.  */
+       cmpq    $-4095, %rax
+       jb      4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       andl    $FUTEX_PRIVATE_FLAG, %esi
+#else
+       andl    %fs:PRIVATE_FUTEX, %esi
+#endif
+       orl     $FUTEX_WAKE, %esi
+       movl    $SYS_futex, %eax
+       /* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
+       movl    $1, %edx  */
        syscall
 
        /* Unlock.  */
@@ -86,7 +134,11 @@ __pthread_cond_signal:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
 #if cond_lock != 0
        subq    $cond_lock, %rdi
 #endif
@@ -95,7 +147,14 @@ __pthread_cond_signal:
        /* Unlock in loop requires wakeup.  */
 5:
        movq    %r8, %rdi
-       callq   __lll_mutex_unlock_wake
+#if cond_lock != 0
+       addq    $cond_lock, %rdi
+#endif
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
        jmp     6b
        .size   __pthread_cond_signal, .-__pthread_cond_signal
 weak_alias(__pthread_cond_signal, pthread_cond_signal)
index f0dcdb7..427a723 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
 
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#include <bits/kernel-features.h>
 
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday    0xffffffffff600000
@@ -37,6 +31,7 @@
 
        .text
 
+
 /* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
                               const struct timespec *abstime)  */
        .globl  __pthread_cond_timedwait
        .align  16
 __pthread_cond_timedwait:
 .LSTARTCODE:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
        pushq   %r12
-.Lpush_r12:
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r12, 0)
        pushq   %r13
-.Lpush_r13:
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r13, 0)
        pushq   %r14
-.Lpush_r14:
-#define FRAME_SIZE 80
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r14, 0)
+       pushq   %r15
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r15, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define FRAME_SIZE 32
+#else
+# define FRAME_SIZE 48
+#endif
        subq    $FRAME_SIZE, %rsp
-.Lsubq:
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+       cfi_remember_state
 
        cmpq    $1000000000, 8(%rdx)
        movl    $EINVAL, %eax
-       jae     18f
+       jae     48f
 
        /* Stack frame:
 
-          rsp + 80
-                   +--------------------------+
-          rsp + 48 | cleanup buffer           |
-                   +--------------------------+
-          rsp + 40 | old wake_seq value       |
-                   +--------------------------+
-          rsp + 24 | timeout value            |
-                   +--------------------------+
+          rsp + 48
+                   +--------------------------+
+          rsp + 32 | timeout value            |
+                   +--------------------------+
+          rsp + 24 | old wake_seq value       |
+                   +--------------------------+
           rsp + 16 | mutex pointer            |
-                   +--------------------------+
+                   +--------------------------+
           rsp +  8 | condvar pointer          |
-                   +--------------------------+
+                   +--------------------------+
           rsp +  4 | old broadcast_seq value  |
-                   +--------------------------+
+                   +--------------------------+
           rsp +  0 | old cancellation mode    |
-                   +--------------------------+
+                   +--------------------------+
        */
 
        cmpq    $-1, dep_mutex(%rdi)
@@ -88,8 +102,18 @@ __pthread_cond_timedwait:
        je      22f
        movq    %rsi, dep_mutex(%rdi)
 
+22:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+       cmpl    $0, __have_futex_clock_realtime(%rip)
+#  else
+       cmpl    $0, __have_futex_clock_realtime
+#  endif
+       je      .Lreltmo
+#endif
+
        /* Get internal lock.  */
-22:    movl    $1, %esi
+       movl    $1, %esi
        xorl    %eax, %eax
        LOCK
 #if cond_lock == 0
@@ -97,89 +121,29 @@ __pthread_cond_timedwait:
 #else
        cmpxchgl %esi, cond_lock(%rdi)
 #endif
-       jnz     1f
+       jnz     31f
 
        /* Unlock the mutex.  */
-2:     movq    16(%rsp), %rdi
+32:    movq    16(%rsp), %rdi
        xorl    %esi, %esi
        callq   __pthread_mutex_unlock_usercnt
 
        testl   %eax, %eax
-       jne     16f
+       jne     46f
 
        movq    8(%rsp), %rdi
        incq    total_seq(%rdi)
        incl    cond_futex(%rdi)
-       addl    $(1 << clock_bits), cond_nwaiters(%rdi)
-
-       /* Install cancellation handler.  */
-#ifdef __PIC__
-       leaq    __condvar_cleanup(%rip), %rsi
-#else
-       leaq    __condvar_cleanup, %rsi
-#endif
-       leaq    48(%rsp), %rdi
-       movq    %rsp, %rdx
-       callq   __pthread_cleanup_push
+       addl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
        /* Get and store current wakeup_seq value.  */
        movq    8(%rsp), %rdi
        movq    wakeup_seq(%rdi), %r9
        movl    broadcast_seq(%rdi), %edx
-       movq    %r9, 40(%rsp)
+       movq    %r9, 24(%rsp)
        movl    %edx, 4(%rsp)
 
-       /* Get the current time.  */
-8:
-#ifdef __NR_clock_gettime
-       /* Get the clock number.  Note that the field in the condvar
-          structure stores the number minus 1.  */
-       movq    8(%rsp), %rdi
-       movl    cond_nwaiters(%rdi), %edi
-       andl    $((1 << clock_bits) - 1), %edi
-       /* Only clocks 0 and 1 are allowed so far.  Both are handled in the
-          kernel.  */
-       leaq    24(%rsp), %rsi
-       movl    $__NR_clock_gettime, %eax
-       syscall
-# ifndef __ASSUME_POSIX_TIMERS
-       cmpq    $-ENOSYS, %rax
-       je      19f
-# endif
-
-       /* Compute relative timeout.  */
-       movq    (%r13), %rcx
-       movq    8(%r13), %rdx
-       subq    24(%rsp), %rcx
-       subq    32(%rsp), %rdx
-#else
-       leaq    24(%rsp), %rdi
-       xorl    %esi, %esi
-       movq    $VSYSCALL_ADDR_vgettimeofday, %rax
-       callq   *%rax
-
-       /* Compute relative timeout.  */
-       movq    32(%rsp), %rax
-       movl    $1000, %edx
-       mul     %rdx            /* Milli seconds to nano seconds.  */
-       movq    (%r13), %rcx
-       movq    8(%r13), %rdx
-       subq    24(%rsp), %rcx
-       subq    %rax, %rdx
-#endif
-       jns     12f
-       addq    $1000000000, %rdx
-       decq    %rcx
-12:    testq   %rcx, %rcx
-       movq    8(%rsp), %rdi
-       movq    $-ETIMEDOUT, %r14
-       js      6f
-
-       /* Store relative timeout.  */
-21:    movq    %rcx, 24(%rsp)
-       movq    %rdx, 32(%rsp)
-
-       movl    cond_futex(%rdi), %r12d
+38:    movl    cond_futex(%rdi), %r12d
 
        /* Unlock.  */
        LOCK
@@ -188,25 +152,67 @@ __pthread_cond_timedwait:
 #else
        decl    cond_lock(%rdi)
 #endif
-       jne     3f
+       jne     33f
 
-4:     callq   __pthread_enable_asynccancel
+.LcleanupSTART1:
+34:    callq   __pthread_enable_asynccancel
        movl    %eax, (%rsp)
 
-       leaq    24(%rsp), %r10
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
+       movq    %r13, %r10
+       movl    $FUTEX_WAIT_BITSET, %esi
+       cmpq    $-1, dep_mutex(%rdi)
+       je      60f
+
+       movq    dep_mutex(%rdi), %r8
+       /* Requeue to a non-robust PI mutex if the PI bit is set and
+       the robust bit is not set.  */
+       movl    MUTEX_KIND(%r8), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       jne     61f
+
+       movl    $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+       xorl    %eax, %eax
+       /* The following only works like this because we only support
+          two clocks, represented using a single bit.  */
+       testl   $1, cond_nwaiters(%rdi)
+       movl    $FUTEX_CLOCK_REALTIME, %edx
+       cmove   %edx, %eax
+       orl     %eax, %esi
+       movq    %r12, %rdx
+       addq    $cond_futex, %rdi
+       movl    $SYS_futex, %eax
+       syscall
+
+       movl    $1, %r15d
+#ifdef __ASSUME_REQUEUE_PI
+       jmp     62f
 #else
-       movl    $FUTEX_WAIT, %esi
+       cmpq    $-4095, %rax
+       jnae    62f
+
+       subq    $cond_futex, %rdi
 #endif
+
+61:    movl    $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
+60:    xorl    %r15d, %r15d
+       xorl    %eax, %eax
+       /* The following only works like this because we only support
+          two clocks, represented using a single bit.  */
+       testl   $1, cond_nwaiters(%rdi)
+       movl    $FUTEX_CLOCK_REALTIME, %edx
+       movl    $0xffffffff, %r9d
+       cmove   %edx, %eax
+       orl     %eax, %esi
        movq    %r12, %rdx
        addq    $cond_futex, %rdi
        movl    $SYS_futex, %eax
        syscall
-       movq    %rax, %r14
+62:    movq    %rax, %r14
 
        movl    (%rsp), %edi
        callq   __pthread_disable_asynccancel
+.LcleanupEND1:
 
        /* Lock.  */
        movq    8(%rsp), %rdi
@@ -218,120 +224,158 @@ __pthread_cond_timedwait:
 #else
        cmpxchgl %esi, cond_lock(%rdi)
 #endif
-       jne     5f
+       jne     35f
 
-6:     movl    broadcast_seq(%rdi), %edx
+36:    movl    broadcast_seq(%rdi), %edx
 
        movq    woken_seq(%rdi), %rax
 
        movq    wakeup_seq(%rdi), %r9
 
        cmpl    4(%rsp), %edx
-       jne     23f
+       jne     53f
 
-       cmpq    40(%rsp), %r9
-       jbe     15f
+       cmpq    24(%rsp), %r9
+       jbe     45f
 
        cmpq    %rax, %r9
-       ja      9f
+       ja      39f
 
-15:    cmpq    $-ETIMEDOUT, %r14
-       jne     8b
+45:    cmpq    $-ETIMEDOUT, %r14
+       jne     38b
 
-13:    incq    wakeup_seq(%rdi)
+99:    incq    wakeup_seq(%rdi)
        incl    cond_futex(%rdi)
        movl    $ETIMEDOUT, %r14d
-       jmp     14f
+       jmp     44f
 
-23:    xorq    %r14, %r14
-       jmp     24f
+53:    xorq    %r14, %r14
+       jmp     54f
 
-9:     xorq    %r14, %r14
-14:    incq    woken_seq(%rdi)
+39:    xorq    %r14, %r14
+44:    incq    woken_seq(%rdi)
 
-24:    subl    $(1 << clock_bits), cond_nwaiters(%rdi)
+54:    subl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
        /* Wake up a thread which wants to destroy the condvar object.  */
        cmpq    $0xffffffffffffffff, total_seq(%rdi)
-       jne     25f
+       jne     55f
        movl    cond_nwaiters(%rdi), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
-       jne     25f
+       andl    $~((1 << nwaiters_shift) - 1), %eax
+       jne     55f
 
        addq    $cond_nwaiters, %rdi
-       movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %esi
+       cmpq    $-1, dep_mutex-cond_nwaiters(%rdi)
        movl    $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
+#else
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
+#endif
+       movl    $SYS_futex, %eax
        syscall
        subq    $cond_nwaiters, %rdi
 
-25:    LOCK
+55:    LOCK
 #if cond_lock == 0
        decl    (%rdi)
 #else
        decl    cond_lock(%rdi)
 #endif
-       jne     10f
+       jne     40f
 
-       /* Remove cancellation handler.  */
-11:    movq    48+CLEANUP_PREV(%rsp), %rdx
-       movq    %rdx, %fs:CLEANUP
+       /* If requeue_pi is used the kernel performs the locking of the
+          mutex. */
+41:    movq    16(%rsp), %rdi
+       testl   %r15d, %r15d
+       jnz     64f
 
-       movq    16(%rsp), %rdi
        callq   __pthread_mutex_cond_lock
 
-       testq   %rax, %rax
+63:    testq   %rax, %rax
        cmoveq  %r14, %rax
 
-18:    addq    $FRAME_SIZE, %rsp
-.Laddq:
+48:    addq    $FRAME_SIZE, %rsp
+       cfi_adjust_cfa_offset(-FRAME_SIZE)
+       popq    %r15
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r15)
        popq    %r14
-.Lpop_r14:
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r14)
        popq    %r13
-.Lpop_r13:
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r13)
        popq    %r12
-.Lpop_r12:
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r12)
 
        retq
 
+       cfi_restore_state
+
+64:    callq   __pthread_mutex_cond_lock_adjust
+       movq    %r14, %rax
+       jmp     48b
+
        /* Initial locking failed.  */
-1:
-.LSbl1:
+31:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
-       jmp     2b
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
+       jmp     32b
 
        /* Unlock in loop requires wakeup.  */
-3:
+33:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
-       jmp     4b
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
+       jmp     34b
 
        /* Locking in loop failed.  */
-5:
+35:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
 #if cond_lock != 0
        subq    $cond_lock, %rdi
 #endif
-       jmp     6b
+       jmp     36b
 
        /* Unlock after loop requires wakeup.  */
-10:
+40:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
-       jmp     11b
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
+       jmp     41b
 
        /* The initial unlocking of the mutex failed.  */
-16:    movq    8(%rsp), %rdi
+46:    movq    8(%rsp), %rdi
        movq    %rax, (%rsp)
        LOCK
 #if cond_lock == 0
@@ -339,30 +383,239 @@ __pthread_cond_timedwait:
 #else
        decl    cond_lock(%rdi)
 #endif
-       jne     17f
+       jne     47f
 
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
+
+47:    movq    (%rsp), %rax
+       jmp     48b
+
 
-17:    movq    (%rsp), %rax
-       jmp     18b
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+       xorl    %r15d, %r15d
 
-#if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
+       /* Get internal lock.  */
+       movl    $1, %esi
+       xorl    %eax, %eax
+       LOCK
+# if cond_lock == 0
+       cmpxchgl %esi, (%rdi)
+# else
+       cmpxchgl %esi, cond_lock(%rdi)
+# endif
+       jnz     1f
+
+       /* Unlock the mutex.  */
+2:     movq    16(%rsp), %rdi
+       xorl    %esi, %esi
+       callq   __pthread_mutex_unlock_usercnt
+
+       testl   %eax, %eax
+       jne     46b
+
+       movq    8(%rsp), %rdi
+       incq    total_seq(%rdi)
+       incl    cond_futex(%rdi)
+       addl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+       /* Get and store current wakeup_seq value.  */
+       movq    8(%rsp), %rdi
+       movq    wakeup_seq(%rdi), %r9
+       movl    broadcast_seq(%rdi), %edx
+       movq    %r9, 24(%rsp)
+       movl    %edx, 4(%rsp)
+
+       /* Get the current time.  */
+8:
+# ifdef __NR_clock_gettime
+       /* Get the clock number.  Note that the field in the condvar
+          structure stores the number minus 1.  */
+       movq    8(%rsp), %rdi
+       movl    cond_nwaiters(%rdi), %edi
+       andl    $((1 << nwaiters_shift) - 1), %edi
+       /* Only clocks 0 and 1 are allowed so far.  Both are handled in the
+          kernel.  */
+       leaq    32(%rsp), %rsi
+#  ifdef SHARED
+       movq    __vdso_clock_gettime@GOTPCREL(%rip), %rax
+       movq    (%rax), %rax
+       PTR_DEMANGLE (%rax)
+       jz      26f
+       call    *%rax
+       jmp     27f
+#  endif
+26:    movl    $__NR_clock_gettime, %eax
+       syscall
+27:
+#  ifndef __ASSUME_POSIX_TIMERS
+       cmpq    $-ENOSYS, %rax
+       je      19f
+#  endif
+
+       /* Compute relative timeout.  */
+       movq    (%r13), %rcx
+       movq    8(%r13), %rdx
+       subq    32(%rsp), %rcx
+       subq    40(%rsp), %rdx
+# else
+       leaq    24(%rsp), %rdi
+       xorl    %esi, %esi
+       movq    $VSYSCALL_ADDR_vgettimeofday, %rax
+       callq   *%rax
+
+       /* Compute relative timeout.  */
+       movq    40(%rsp), %rax
+       movl    $1000, %edx
+       mul     %rdx            /* Milli seconds to nano seconds.  */
+       movq    (%r13), %rcx
+       movq    8(%r13), %rdx
+       subq    32(%rsp), %rcx
+       subq    %rax, %rdx
+# endif
+       jns     12f
+       addq    $1000000000, %rdx
+       decq    %rcx
+12:    testq   %rcx, %rcx
+       movq    8(%rsp), %rdi
+       movq    $-ETIMEDOUT, %r14
+       js      6f
+
+       /* Store relative timeout.  */
+21:    movq    %rcx, 32(%rsp)
+       movq    %rdx, 40(%rsp)
+
+       movl    cond_futex(%rdi), %r12d
+
+       /* Unlock.  */
+       LOCK
+# if cond_lock == 0
+       decl    (%rdi)
+# else
+       decl    cond_lock(%rdi)
+# endif
+       jne     3f
+
+.LcleanupSTART2:
+4:     callq   __pthread_enable_asynccancel
+       movl    %eax, (%rsp)
+
+       leaq    32(%rsp), %r10
+       cmpq    $-1, dep_mutex(%rdi)
+       movq    %r12, %rdx
+# ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAIT, %eax
+       movl    $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
+# else
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+#  if FUTEX_WAIT != 0
+       orl     $FUTEX_WAIT, %esi
+#  endif
+# endif
+       addq    $cond_futex, %rdi
+       movl    $SYS_futex, %eax
+       syscall
+       movq    %rax, %r14
+
+       movl    (%rsp), %edi
+       callq   __pthread_disable_asynccancel
+.LcleanupEND2:
+
+       /* Lock.  */
+       movq    8(%rsp), %rdi
+       movl    $1, %esi
+       xorl    %eax, %eax
+       LOCK
+# if cond_lock == 0
+       cmpxchgl %esi, (%rdi)
+# else
+       cmpxchgl %esi, cond_lock(%rdi)
+# endif
+       jne     5f
+
+6:     movl    broadcast_seq(%rdi), %edx
+
+       movq    woken_seq(%rdi), %rax
+
+       movq    wakeup_seq(%rdi), %r9
+
+       cmpl    4(%rsp), %edx
+       jne     53b
+
+       cmpq    24(%rsp), %r9
+       jbe     15f
+
+       cmpq    %rax, %r9
+       ja      39b
+
+15:    cmpq    $-ETIMEDOUT, %r14
+       jne     8b
+
+       jmp     99b
+
+       /* Initial locking failed.  */
+1:
+# if cond_lock != 0
+       addq    $cond_lock, %rdi
+# endif
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
+       jmp     2b
+
+       /* Unlock in loop requires wakeup.  */
+3:
+# if cond_lock != 0
+       addq    $cond_lock, %rdi
+# endif
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
+       jmp     4b
+
+       /* Locking in loop failed.  */
+5:
+# if cond_lock != 0
+       addq    $cond_lock, %rdi
+# endif
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
+# if cond_lock != 0
+       subq    $cond_lock, %rdi
+# endif
+       jmp     6b
+
+# if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
        /* clock_gettime not available.  */
-19:    leaq    24(%rsp), %rdi
+19:    leaq    32(%rsp), %rdi
        xorl    %esi, %esi
        movq    $VSYSCALL_ADDR_vgettimeofday, %rax
        callq   *%rax
 
        /* Compute relative timeout.  */
-       movq    32(%rsp), %rax
+       movq    40(%rsp), %rax
        movl    $1000, %edx
        mul     %rdx            /* Milli seconds to nano seconds.  */
        movq    (%r13), %rcx
        movq    8(%r13), %rdx
-       subq    24(%rsp), %rcx
+       subq    32(%rsp), %rcx
        subq    %rax, %rdx
        jns     20f
        addq    $1000000000, %rdx
@@ -372,97 +625,187 @@ __pthread_cond_timedwait:
        movq    $-ETIMEDOUT, %r14
        js      6b
        jmp     21b
+# endif
 #endif
-.LENDCODE:
        .size   __pthread_cond_timedwait, .-__pthread_cond_timedwait
 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
 
 
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
-       .long   .LENDCIE-.LSTARTCIE             # Length of the CIE.
-.LSTARTCIE:
-       .long   0                               # CIE ID.
-       .byte   1                               # Version number.
-#ifdef SHARED
-       .string "zR"                            # NUL-terminated augmentation
-                                               # string.
+       .align  16
+       .type   __condvar_cleanup2, @function
+__condvar_cleanup2:
+       /* Stack frame:
+
+          rsp + 72
+                   +--------------------------+
+          rsp + 64 | %r12                     |
+                   +--------------------------+
+          rsp + 56 | %r13                     |
+                   +--------------------------+
+          rsp + 48 | %r14                     |
+                   +--------------------------+
+          rsp + 24 | unused                   |
+                   +--------------------------+
+          rsp + 16 | mutex pointer            |
+                   +--------------------------+
+          rsp +  8 | condvar pointer          |
+                   +--------------------------+
+          rsp +  4 | old broadcast_seq value  |
+                   +--------------------------+
+          rsp +  0 | old cancellation mode    |
+                   +--------------------------+
+       */
+
+       movq    %rax, 24(%rsp)
+
+       /* Get internal lock.  */
+       movq    8(%rsp), %rdi
+       movl    $1, %esi
+       xorl    %eax, %eax
+       LOCK
+#if cond_lock == 0
+       cmpxchgl %esi, (%rdi)
 #else
-       .ascii  "\0"                            # NUL-terminated augmentation
-                                               # string.
+       cmpxchgl %esi, cond_lock(%rdi)
 #endif
-       .uleb128 1                              # Code alignment factor.
-       .sleb128 -8                             # Data alignment factor.
-       .byte   16                              # Return address register
-                                               # column.
-#ifdef SHARED
-       .uleb128 1                              # Augmentation value length.
-       .byte   0x1b                            # Encoding: DW_EH_PE_pcrel
-                                               # + DW_EH_PE_sdata4.
+       jz      1f
+
+#if cond_lock != 0
+       addq    $cond_lock, %rdi
 #endif
-       .byte 0x0c                              # DW_CFA_def_cfa
-       .uleb128 7
-       .uleb128 8
-       .byte   0x90                            # DW_CFA_offset, column 0x8
-       .uleb128 1
-       .align 8
-.LENDCIE:
-
-       .long   .LENDFDE-.LSTARTFDE             # Length of the FDE.
-.LSTARTFDE:
-       .long   .LSTARTFDE-.LSTARTFRAME         # CIE pointer.
-#ifdef SHARED
-       .long   .LSTARTCODE-.                   # PC-relative start address
-                                               # of the code
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
+#if cond_lock != 0
+       subq    $cond_lock, %rdi
+#endif
+
+1:     movl    broadcast_seq(%rdi), %edx
+       cmpl    4(%rsp), %edx
+       jne     3f
+
+       /* We increment the wakeup_seq counter only if it is lower than
+          total_seq.  If this is not the case the thread was woken and
+          then canceled.  In this case we ignore the signal.  */
+       movq    total_seq(%rdi), %rax
+       cmpq    wakeup_seq(%rdi), %rax
+       jbe     6f
+       incq    wakeup_seq(%rdi)
+       incl    cond_futex(%rdi)
+6:     incq    woken_seq(%rdi)
+
+3:     subl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+       /* Wake up a thread which wants to destroy the condvar object.  */
+       xorq    %r12, %r12
+       cmpq    $0xffffffffffffffff, total_seq(%rdi)
+       jne     4f
+       movl    cond_nwaiters(%rdi), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
+       jne     4f
+
+       cmpq    $-1, dep_mutex(%rdi)
+       leaq    cond_nwaiters(%rdi), %rdi
+       movl    $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
 #else
-       .long   .LSTARTCODE                     # Start address of the code.
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
 #endif
-       .long   .LENDCODE-.LSTARTCODE           # Length of the code.
-#ifdef SHARED
-       .uleb128 0                              # No augmentation data.
+       movl    $SYS_futex, %eax
+       syscall
+       subq    $cond_nwaiters, %rdi
+       movl    $1, %r12d
+
+4:     LOCK
+#if cond_lock == 0
+       decl    (%rdi)
+#else
+       decl    cond_lock(%rdi)
+#endif
+       je      2f
+#if cond_lock != 0
+       addq    $cond_lock, %rdi
+#endif
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
+
+       /* Wake up all waiters to make sure no signal gets lost.  */
+2:     testq   %r12, %r12
+       jnz     5f
+       addq    $cond_futex, %rdi
+       cmpq    $-1, dep_mutex-cond_futex(%rdi)
+       movl    $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
+#else
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
+#endif
+       movl    $SYS_futex, %eax
+       syscall
+
+5:     movq    16(%rsp), %rdi
+       callq   __pthread_mutex_cond_lock
+
+       movq    24(%rsp), %rdi
+       movq    FRAME_SIZE(%rsp), %r15
+       movq    FRAME_SIZE+8(%rsp), %r14
+       movq    FRAME_SIZE+16(%rsp), %r13
+       movq    FRAME_SIZE+24(%rsp), %r12
+.LcallUR:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE:
+       cfi_endproc
+       .size   __condvar_cleanup2, .-__condvar_cleanup2
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   DW_EH_PE_omit                   # @LPStart format
+       .byte   DW_EH_PE_omit                   # @TType format
+       .byte   DW_EH_PE_uleb128                # call-site format
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART1-.LSTARTCODE
+       .uleb128 .LcleanupEND1-.LcleanupSTART1
+       .uleb128 __condvar_cleanup2-.LSTARTCODE
+       .uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       .uleb128 .LcleanupSTART2-.LSTARTCODE
+       .uleb128 .LcleanupEND2-.LcleanupSTART2
+       .uleb128 __condvar_cleanup2-.LSTARTCODE
+       .uleb128  0
 #endif
-       .byte   0x40+.Lpush_r12-.LSTARTCODE     # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-       .byte   0x8c                            # DW_CFA_offset %r12
-       .uleb128 2
-       .byte   0x40+.Lpush_r13-.Lpush_r12      # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 24
-       .byte   0x8d                            # DW_CFA_offset %r13
-       .uleb128 3
-       .byte   0x40+.Lpush_r14-.Lpush_r13      # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 32
-       .byte   0x84                            # DW_CFA_offset %r14
-       .uleb128 4
-       .byte   0x40+.Lsubq-.Lpush_r14          # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 32+FRAME_SIZE
-       .byte   3                               # DW_CFA_advance_loc2
-       .2byte  .Laddq-.Lsubq
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 32
-       .byte   0x40+.Lpop_r14-.Laddq           # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 24
-       .byte   0xce                            # DW_CFA_restore %r14
-       .byte   0x40+.Lpop_r13-.Lpop_r14        # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-       .byte   0xcd                            # DW_CFA_restore %r13
-       .byte   0x40+.Lpop_r12-.Lpop_r13        # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 8
-       .byte   0xcc                            # DW_CFA_restore %r12
-       .byte   0x40+.LSbl1-.Lpop_r12           # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 32+FRAME_SIZE
-       .byte   0x8c                            # DW_CFA_offset %r12
-       .uleb128 2
-       .byte   0x8d                            # DW_CFA_offset %r13
-       .uleb128 3
-       .byte   0x84                            # DW_CFA_offset %r14
-       .uleb128 4
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
        .align  8
-.LENDFDE:
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+       .quad   __gcc_personality_v0
+#endif
index 544118e..7c488f2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <tcb-offsets.h>
+#include <pthread-pi-defines.h>
 
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#include <bits/kernel-features.h>
 
 
        .text
 
-       .align  16
-       .type   __condvar_cleanup, @function
-       .globl  __condvar_cleanup
-       .hidden __condvar_cleanup
-__condvar_cleanup:
-       pushq   %r12
-
-       /* Get internal lock.  */
-       movq    %rdi, %r8
-       movq    8(%rdi), %rdi
-       movl    $1, %esi
-       xorl    %eax, %eax
-       LOCK
-#if cond_lock == 0
-       cmpxchgl %esi, (%rdi)
-#else
-       cmpxchgl %esi, cond_lock(%rdi)
-#endif
-       jz      1f
-
-#if cond_lock != 0
-       addq    $cond_lock, %rdi
-#endif
-       callq   __lll_mutex_lock_wait
-#if cond_lock != 0
-       subq    $cond_lock, %rdi
-#endif
-
-1:     movl    broadcast_seq(%rdi), %edx
-       cmpl    4(%r8), %edx
-       jne     3f
-
-       incq    wakeup_seq(%rdi)
-       incq    woken_seq(%rdi)
-       incl    cond_futex(%rdi)
-
-3:     subl    $(1 << clock_bits), cond_nwaiters(%rdi)
-
-       /* Wake up a thread which wants to destroy the condvar object.  */
-       xorq    %r12, %r12
-       cmpq    $0xffffffffffffffff, total_seq(%rdi)
-       jne     4f
-       movl    cond_nwaiters(%rdi), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
-       jne     4f
-
-       addq    $cond_nwaiters, %rdi
-       movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %esi
-       movl    $1, %edx
-       syscall
-       subq    $cond_nwaiters, %rdi
-       movl    $1, %r12d
-
-4:     LOCK
-#if cond_lock == 0
-       decl    (%rdi)
-#else
-       decl    cond_lock(%rdi)
-#endif
-       je      2f
-#if cond_lock != 0
-       addq    $cond_lock, %rdi
-#endif
-       callq   __lll_mutex_unlock_wake
-
-       /* Wake up all waiters to make sure no signal gets lost.  */
-2:     testq   %r12, %r12
-       jnz     5f
-       addq    $cond_futex, %rdi
-       movl    $FUTEX_WAKE, %esi
-       movl    $0x7fffffff, %edx
-       movl    $SYS_futex, %eax
-       syscall
-
-5:     movq    16(%r8), %rdi
-       callq   __pthread_mutex_cond_lock
-
-       popq    %r12
-
-       retq
-       .size   __condvar_cleanup, .-__condvar_cleanup
-
-
 /* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
        .globl  __pthread_cond_wait
        .type   __pthread_cond_wait, @function
        .align  16
 __pthread_cond_wait:
 .LSTARTCODE:
-       pushq   %r12
-.Lpush_r12:
-#define FRAME_SIZE 64
-       subq    $FRAME_SIZE, %rsp
-.Lsubq:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+#define FRAME_SIZE 32
+       leaq    -FRAME_SIZE(%rsp), %rsp
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+
        /* Stack frame:
 
-          rsp + 64
-                   +--------------------------+
-          rsp + 32 | cleanup buffer           |
+          rsp + 32
                    +--------------------------+
           rsp + 24 | old wake_seq value       |
                    +--------------------------+
@@ -177,17 +95,7 @@ __pthread_cond_wait:
        movq    8(%rsp), %rdi
        incq    total_seq(%rdi)
        incl    cond_futex(%rdi)
-       addl    $(1 << clock_bits), cond_nwaiters(%rdi)
-
-       /* Install cancellation handler.  */
-#ifdef __PIC__
-       leaq    __condvar_cleanup(%rip), %rsi
-#else
-       leaq    __condvar_cleanup, %rsi
-#endif
-       leaq    32(%rsp), %rdi
-       movq    %rsp, %rdx
-       callq   __pthread_cleanup_push
+       addl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
        /* Get and store current wakeup_seq value.  */
        movq    8(%rsp), %rdi
@@ -197,7 +105,7 @@ __pthread_cond_wait:
        movl    %edx, 4(%rsp)
 
        /* Unlock.  */
-8:     movl    cond_futex(%rdi), %r12d
+8:     movl    cond_futex(%rdi), %edx
        LOCK
 #if cond_lock == 0
        decl    (%rdi)
@@ -206,23 +114,53 @@ __pthread_cond_wait:
 #endif
        jne     3f
 
+.LcleanupSTART:
 4:     callq   __pthread_enable_asynccancel
        movl    %eax, (%rsp)
 
-       movq    8(%rsp), %rdi
        xorq    %r10, %r10
-       movq    %r12, %rdx
-       addq    $cond_futex-cond_lock, %rdi
+       cmpq    $-1, dep_mutex(%rdi)
+       leaq    cond_futex(%rdi), %rdi
+       movl    $FUTEX_WAIT, %esi
+       je      60f
+
+       movq    dep_mutex-cond_futex(%rdi), %r8
+       /* Requeue to a non-robust PI mutex if the PI bit is set and
+       the robust bit is not set.  */
+       movl    MUTEX_KIND(%r8), %eax
+       andl    $(ROBUST_BIT|PI_BIT), %eax
+       cmpl    $PI_BIT, %eax
+       jne     61f
+
+       movl    $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
        movl    $SYS_futex, %eax
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
+       syscall
+
+       movl    $1, %r8d
+#ifdef __ASSUME_REQUEUE_PI
+       jmp     62f
 #else
+       cmpq    $-4095, %rax
+       jnae    62f
+
+# ifndef __ASSUME_PRIVATE_FUTEX
        movl    $FUTEX_WAIT, %esi
+# endif
 #endif
+
+61:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+#else
+       orl     %fs:PRIVATE_FUTEX, %esi
+#endif
+60:    xorl    %r8d, %r8d
+       movl    $SYS_futex, %eax
        syscall
 
-       movl    (%rsp), %edi
+62:    movl    (%rsp), %edi
        callq   __pthread_disable_asynccancel
+.LcleanupEND:
 
        /* Lock.  */
        movq    8(%rsp), %rdi
@@ -254,19 +192,29 @@ __pthread_cond_wait:
        incq    woken_seq(%rdi)
 
        /* Unlock */
-16:    subl    $(1 << clock_bits), cond_nwaiters(%rdi)
+16:    subl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
        /* Wake up a thread which wants to destroy the condvar object.  */
        cmpq    $0xffffffffffffffff, total_seq(%rdi)
        jne     17f
        movl    cond_nwaiters(%rdi), %eax
-       andl    $~((1 << clock_bits) - 1), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
        jne     17f
 
        addq    $cond_nwaiters, %rdi
-       movl    $SYS_futex, %eax
-       movl    $FUTEX_WAKE, %esi
+       cmpq    $-1, dep_mutex-cond_nwaiters(%rdi)
        movl    $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
+#else
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
+#endif
+       movl    $SYS_futex, %eax
        syscall
        subq    $cond_nwaiters, %rdi
 
@@ -278,28 +226,36 @@ __pthread_cond_wait:
 #endif
        jne     10f
 
-       /* Remove cancellation handler.  */
-11:    movq    32+CLEANUP_PREV(%rsp), %rdx
-       movq    %rdx, %fs:CLEANUP
+       /* If requeue_pi is used the kernel performs the locking of the
+          mutex. */
+11:    movq    16(%rsp), %rdi
+       testl   %r8d, %r8d
+       jnz     18f
 
-       movq    16(%rsp), %rdi
        callq   __pthread_mutex_cond_lock
-14:    addq    $FRAME_SIZE, %rsp
-.Laddq:
 
-       popq    %r12
-.Lpop_r12:
+14:    leaq    FRAME_SIZE(%rsp), %rsp
+       cfi_adjust_cfa_offset(-FRAME_SIZE)
 
        /* We return the result of the mutex_lock operation.  */
        retq
 
+       cfi_adjust_cfa_offset(FRAME_SIZE)
+
+18:    callq   __pthread_mutex_cond_lock_adjust
+       xorl    %eax, %eax
+       jmp     14b
+
        /* Initial locking failed.  */
 1:
-.LSbl1:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
        jmp     2b
 
        /* Unlock in loop requires wakeup.  */
@@ -307,7 +263,15 @@ __pthread_cond_wait:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       /* The call preserves %rdx.  */
+       callq   __lll_unlock_wake
+#if cond_lock != 0
+       subq    $cond_lock, %rdi
+#endif
        jmp     4b
 
        /* Locking in loop failed.  */
@@ -315,7 +279,11 @@ __pthread_cond_wait:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
 #if cond_lock != 0
        subq    $cond_lock, %rdi
 #endif
@@ -326,7 +294,11 @@ __pthread_cond_wait:
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
        jmp     11b
 
        /* The initial unlocking of the mutex failed.  */
@@ -338,83 +310,185 @@ __pthread_cond_wait:
 #else
        decl    cond_lock(%rdi)
 #endif
-       jne     13f
+       j     13f
 
 #if cond_lock != 0
        addq    $cond_lock, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_unlock_wake
 
 13:    movq    %r10, %rax
        jmp     14b
-.LENDCODE:
        .size   __pthread_cond_wait, .-__pthread_cond_wait
 weak_alias(__pthread_cond_wait, pthread_cond_wait)
 
 
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
-       .long   .LENDCIE-.LSTARTCIE             # Length of the CIE.
-.LSTARTCIE:
-       .long   0                               # CIE ID.
-       .byte   1                               # Version number.
-#ifdef SHARED
-       .string "zR"                            # NUL-terminated augmentation
-                                               # string.
+       .align  16
+       .type   __condvar_cleanup1, @function
+       .globl  __condvar_cleanup1
+       .hidden __condvar_cleanup1
+__condvar_cleanup1:
+       /* Stack frame:
+
+          rsp + 32
+                   +--------------------------+
+          rsp + 24 | unused                   |
+                   +--------------------------+
+          rsp + 16 | mutex pointer            |
+                   +--------------------------+
+          rsp +  8 | condvar pointer          |
+                   +--------------------------+
+          rsp +  4 | old broadcast_seq value  |
+                   +--------------------------+
+          rsp +  0 | old cancellation mode    |
+                   +--------------------------+
+       */
+
+       movq    %rax, 24(%rsp)
+
+       /* Get internal lock.  */
+       movq    8(%rsp), %rdi
+       movl    $1, %esi
+       xorl    %eax, %eax
+       LOCK
+#if cond_lock == 0
+       cmpxchgl %esi, (%rdi)
 #else
-       .ascii  "\0"                            # NUL-terminated augmentation
-                                               # string.
+       cmpxchgl %esi, cond_lock(%rdi)
 #endif
-       .uleb128 1                              # Code alignment factor.
-       .sleb128 -8                             # Data alignment factor.
-       .byte   16                              # Return address register
-                                               # column.
-#ifdef SHARED
-       .uleb128 1                              # Augmentation value length.
-       .byte   0x1b                            # Encoding: DW_EH_PE_pcrel
-                                               # + DW_EH_PE_sdata4.
+       jz      1f
+
+#if cond_lock != 0
+       addq    $cond_lock, %rdi
 #endif
-       .byte 0x0c                              # DW_CFA_def_cfa
-       .uleb128 7
-       .uleb128 8
-       .byte   0x90                            # DW_CFA_offset, column 0x8
-       .uleb128 1
-       .align 8
-.LENDCIE:
-
-       .long   .LENDFDE-.LSTARTFDE             # Length of the FDE.
-.LSTARTFDE:
-       .long   .LSTARTFDE-.LSTARTFRAME         # CIE pointer.
-#ifdef SHARED
-       .long   .LSTARTCODE-.                   # PC-relative start address
-                                               # of the code
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       callq   __lll_lock_wait
+#if cond_lock != 0
+       subq    $cond_lock, %rdi
+#endif
+
+1:     movl    broadcast_seq(%rdi), %edx
+       cmpl    4(%rsp), %edx
+       jne     3f
+
+       /* We increment the wakeup_seq counter only if it is lower than
+          total_seq.  If this is not the case the thread was woken and
+          then canceled.  In this case we ignore the signal.  */
+       movq    total_seq(%rdi), %rax
+       cmpq    wakeup_seq(%rdi), %rax
+       jbe     6f
+       incq    wakeup_seq(%rdi)
+       incl    cond_futex(%rdi)
+6:     incq    woken_seq(%rdi)
+
+3:     subl    $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+       /* Wake up a thread which wants to destroy the condvar object.  */
+       xorl    %ecx, %ecx
+       cmpq    $0xffffffffffffffff, total_seq(%rdi)
+       jne     4f
+       movl    cond_nwaiters(%rdi), %eax
+       andl    $~((1 << nwaiters_shift) - 1), %eax
+       jne     4f
+
+       cmpq    $-1, dep_mutex(%rdi)
+       leaq    cond_nwaiters(%rdi), %rdi
+       movl    $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
 #else
-       .long   .LSTARTCODE                     # Start address of the code.
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
 #endif
-       .long   .LENDCODE-.LSTARTCODE           # Length of the code.
-#ifdef SHARED
-       .uleb128 0                              # No augmentation data.
+       movl    $SYS_futex, %eax
+       syscall
+       subq    $cond_nwaiters, %rdi
+       movl    $1, %ecx
+
+4:     LOCK
+#if cond_lock == 0
+       decl    (%rdi)
+#else
+       decl    cond_lock(%rdi)
+#endif
+       je      2f
+#if cond_lock != 0
+       addq    $cond_lock, %rdi
+#endif
+       cmpq    $-1, dep_mutex-cond_lock(%rdi)
+       movl    $LLL_PRIVATE, %eax
+       movl    $LLL_SHARED, %esi
+       cmovne  %eax, %esi
+       /* The call preserves %rcx.  */
+       callq   __lll_unlock_wake
+
+       /* Wake up all waiters to make sure no signal gets lost.  */
+2:     testl   %ecx, %ecx
+       jnz     5f
+       addq    $cond_futex, %rdi
+       cmpq    $-1, dep_mutex-cond_futex(%rdi)
+       movl    $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE, %eax
+       movl    $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+       cmove   %eax, %esi
+#else
+       movl    $0, %eax
+       movl    %fs:PRIVATE_FUTEX, %esi
+       cmove   %eax, %esi
+       orl     $FUTEX_WAKE, %esi
 #endif
-       .byte   0x40+.Lpush_r12-.LSTARTCODE     # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-       .byte   0x8c                            # DW_CFA_offset %r12
-       .uleb128 2
-       .byte   0x40+.Lsubq-.Lpush_r12          # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16+FRAME_SIZE
-       .byte   3                               # DW_CFA_advance_loc2
-       .2byte  .Laddq-.Lsubq
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-       .byte   0x40+.Lpop_r12-.Laddq           # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 8
-       .byte   0xcc                            # DW_CFA_restore %r12
-       .byte   0x40+.LSbl1-.Lpop_r12           # DW_CFA_advance_loc+N
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 80
-       .byte   0x8c                            # DW_CFA_offset %r12
-       .uleb128 2
+       movl    $SYS_futex, %eax
+       syscall
+
+5:     movq    16(%rsp), %rdi
+       callq   __pthread_mutex_cond_lock
+
+       movq    24(%rsp), %rdi
+.LcallUR:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE:
+       cfi_endproc
+       .size   __condvar_cleanup1, .-__condvar_cleanup1
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   DW_EH_PE_omit                   # @LPStart format
+       .byte   DW_EH_PE_omit                   # @TType format
+       .byte   DW_EH_PE_uleb128                # call-site format
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 __condvar_cleanup1-.LSTARTCODE
+       .uleb128  0
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
        .align  8
-.LENDFDE:
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+       .quad   __gcc_personality_v0
+#endif
index d8bfa26..0ac952b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+#include <lowlevellock.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define FUTEX_WAIT  0
-#define FUTEX_WAKE     1
 
        .comm   __fork_generation, 4, 4
 
        .align  16
 __pthread_once:
 .LSTARTCODE:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
        testl   $2, (%rdi)
        jz      1f
        xorl    %eax, %eax
@@ -45,7 +49,7 @@ __pthread_once:
 
        /* Preserve the function pointer.  */
 1:     pushq   %rsi
-.Lpush_rsi:
+       cfi_adjust_cfa_offset(8)
        xorq    %r10, %r10
 
        /* Not yet initialized or initialization in progress.
@@ -76,10 +80,15 @@ __pthread_once:
        jnz     3f      /* Different for generation -> run initializer.  */
 
        /* Somebody else got here first.  Wait.  */
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %esi
 #else
+# if FUTEX_WAIT == 0
+       movl    %fs:PRIVATE_FUTEX, %esi
+# else
        movl    $FUTEX_WAIT, %esi
+       orl     %fs:PRIVATE_FUTEX, %esi
+# endif
 #endif
        movl    $SYS_futex, %eax
        syscall
@@ -87,31 +96,40 @@ __pthread_once:
 
        /* Preserve the pointer to the control variable.  */
 3:     pushq   %rdi
-.Lpush_rdi:
+       cfi_adjust_cfa_offset(8)
+       pushq   %rdi
+       cfi_adjust_cfa_offset(8)
 
 .LcleanupSTART:
-       callq   *8(%rsp)
+       callq   *16(%rsp)
 .LcleanupEND:
 
        /* Get the control variable address back.  */
        popq    %rdi
-.Lpop_rdi:
+       cfi_adjust_cfa_offset(-8)
 
        /* Sucessful run of the initializer.  Signal that we are done.  */
        LOCK
        incl    (%rdi)
 
+       addq    $8, %rsp
+       cfi_adjust_cfa_offset(-8)
+
        /* Wake up all other threads.  */
        movl    $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
        movl    $FUTEX_WAKE, %esi
+       orl     %fs:PRIVATE_FUTEX, %esi
+#endif
        movl    $SYS_futex, %eax
        syscall
 
 4:     addq    $8, %rsp
-.Ladd:
+       cfi_adjust_cfa_offset(-8)
        xorl    %eax, %eax
        retq
-
        .size   __pthread_once,.-__pthread_once
 
 
@@ -125,12 +143,18 @@ pthread_once = __pthread_once
        .type   clear_once_control,@function
        .align  16
 clear_once_control:
+       cfi_adjust_cfa_offset(3 * 8)
        movq    (%rsp), %rdi
        movq    %rax, %r8
        movl    $0, (%rdi)
 
        movl    $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
        movl    $FUTEX_WAKE, %esi
+       orl     %fs:PRIVATE_FUTEX, %esi
+#endif
        movl    $SYS_futex, %eax
        syscall
 
@@ -139,15 +163,15 @@ clear_once_control:
        call    _Unwind_Resume@PLT
        hlt
 .LENDCODE:
+       cfi_endproc
        .size   clear_once_control,.-clear_once_control
 
 
        .section .gcc_except_table,"a",@progbits
 .LexceptSTART:
-       .byte   0xff                            # @LPStart format (omit)
-       .byte   0xff                            # @TType format (omit)
-       .byte   0x01                            # call-site format
-                                               # DW_EH_PE_uleb128
+       .byte   DW_EH_PE_omit                   # @LPStart format
+       .byte   DW_EH_PE_omit                   # @TType format
+       .byte   DW_EH_PE_uleb128                # call-site format
        .uleb128 .Lcstend-.Lcstbegin
 .Lcstbegin:
        .uleb128 .LcleanupSTART-.LSTARTCODE
@@ -161,92 +185,6 @@ clear_once_control:
 .Lcstend:
 
 
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
-       .long   .LENDCIE-.LSTARTCIE             # Length of the CIE.
-.LSTARTCIE:
-       .long   0                               # CIE ID.
-       .byte   1                               # Version number.
-#ifdef SHARED
-       .string "zPLR"                          # NUL-terminated augmentation
-                                               # string.
-#else
-       .string "zPL"                           # NUL-terminated augmentation
-                                               # string.
-#endif
-       .uleb128 1                              # Code alignment factor.
-       .sleb128 -8                             # Data alignment factor.
-       .byte   16                              # Return address register
-                                               # column.
-#ifdef SHARED
-       .uleb128 7                              # Augmentation value length.
-       .byte   0x9b                            # Personality: DW_EH_PE_pcrel
-                                               # + DW_EH_PE_sdata4
-                                               # + DW_EH_PE_indirect
-       .long   DW.ref.__gcc_personality_v0-.
-       .byte   0x1b                            # LSDA Encoding: DW_EH_PE_pcrel
-                                               # + DW_EH_PE_sdata4.
-       .byte   0x1b                            # FDE Encoding: DW_EH_PE_pcrel
-                                               # + DW_EH_PE_sdata4.
-#else
-       .uleb128 10                             # Augmentation value length.
-       .byte   0x0                             # Personality: absolute
-       .quad   __gcc_personality_v0
-       .byte   0x0                             # LSDA Encoding: absolute
-#endif
-       .byte 0x0c                              # DW_CFA_def_cfa
-       .uleb128 7
-       .uleb128 8
-       .byte   0x90                            # DW_CFA_offset, column 0x10
-       .uleb128 1
-       .align 8
-.LENDCIE:
-
-       .long   .LENDFDE-.LSTARTFDE             # Length of the FDE.
-.LSTARTFDE:
-       .long   .LSTARTFDE-.LSTARTFRAME         # CIE pointer.
-#ifdef SHARED
-       .long   .LSTARTCODE-.                   # PC-relative start address
-                                               # of the code.
-       .long   .LENDCODE-.LSTARTCODE           # Length of the code.
-       .uleb128 4                              # Augmentation size
-       .long   .LexceptSTART-.
-#else
-       .quad   .LSTARTCODE                     # Start address of the code.
-       .quad   .LENDCODE-.LSTARTCODE           # Length of the code.
-       .uleb128 8                              # Augmentation size
-       .quad   .LexceptSTART
-#endif
-       .byte   4                               # DW_CFA_advance_loc4
-       .long   .Lpush_rsi-.LSTARTCODE
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-       .byte   4                               # DW_CFA_advance_loc4
-       .long   .Lpush_rdi-.Lpush_rsi
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 24
-       .byte   4                               # DW_CFA_advance_loc4
-       .long   .Lpop_rdi-.Lpush_rdi
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-       .byte   4                               # DW_CFA_advance_loc4
-       .long   .Ladd-.Lpop_rdi
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 8
-       .byte   4                               # DW_CFA_advance_loc4
-       .long   clear_once_control-.Ladd
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 24
-#if 0
-       .byte   4                               # DW_CFA_advance_loc4
-       .long   .Lpop_rdi2-clear_once_control
-       .byte   14                              # DW_CFA_def_cfa_offset
-       .uleb128 16
-#endif
-       .align  8
-.LENDFDE:
-
-
 #ifdef SHARED
        .hidden DW.ref.__gcc_personality_v0
        .weak   DW.ref.__gcc_personality_v0
index d754357..9b8408b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
@@ -39,6 +30,7 @@
        .type   __pthread_rwlock_rdlock,@function
        .align  16
 __pthread_rwlock_rdlock:
+       cfi_startproc
        xorq    %r10, %r10
 
        /* Get the lock.  */
@@ -73,12 +65,20 @@ __pthread_rwlock_rdlock:
 #endif
        jne     10f
 
-11:    addq    $READERS_WAKEUP, %rdi
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+       xorl    PSHARED(%rdi), %esi
 #else
+# if FUTEX_WAIT == 0
+       movl    PSHARED(%rdi), %esi
+# else
        movl    $FUTEX_WAIT, %esi
+       orl     PSHARED(%rdi), %esi
+# endif
+       xorl    %fs:PRIVATE_FUTEX, %esi
 #endif
+       addq    $READERS_WAKEUP, %rdi
        movl    $SYS_futex, %eax
        syscall
 
@@ -113,11 +113,11 @@ __pthread_rwlock_rdlock:
        movq    %rdx, %rax
        retq
 
-1:
+1:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
@@ -129,11 +129,11 @@ __pthread_rwlock_rdlock:
        movl    $EDEADLK, %edx
        jmp     9b
 
-6:
+6:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
@@ -149,25 +149,26 @@ __pthread_rwlock_rdlock:
        movl    $EAGAIN, %edx
        jmp     9b
 
-10:
+10:    movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
        jmp     11b
 
-12:
+12:    movl    PSHARED(%rdi), %esi
 #if MUTEX == 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
        jmp     13b
+       cfi_endproc
        .size   __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
 
        .globl  pthread_rwlock_rdlock
index f044842..bb12d49 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday    0xffffffffff600000
 
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
        .text
 
        .globl  pthread_rwlock_timedrdlock
        .type   pthread_rwlock_timedrdlock,@function
        .align  16
 pthread_rwlock_timedrdlock:
+       cfi_startproc
        pushq   %r12
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r12, 0)
        pushq   %r13
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG        %edx
+#else
        pushq   %r14
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r14, 0)
+
        subq    $16, %rsp
+       cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
 
        movq    %rdi, %r12
        movq    %rsi, %r13
@@ -77,7 +81,7 @@ pthread_rwlock_timedrdlock:
        incl    READERS_QUEUED(%r12)
        je      4f
 
-       movl    READERS_WAKEUP(%r12), %r14d
+       movl    READERS_WAKEUP(%r12), VALREG
 
        /* Unlock.  */
        LOCK
@@ -88,8 +92,33 @@ pthread_rwlock_timedrdlock:
 #endif
        jne     10f
 
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+       cmpl    $0, __have_futex_clock_realtime(%rip)
+#  else
+       cmpl    $0, __have_futex_clock_realtime
+#  endif
+       je      .Lreltmo
+#endif
+
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+       xorl    PSHARED(%r12), %esi
+       movq    %r13, %r10
+       movl    $0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       movl    %r14d, %edx
+#endif
+21:    leaq    READERS_WAKEUP(%r12), %rdi
+       movl    $SYS_futex, %eax
+       syscall
+       movq    %rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       .subsection 2
+.Lreltmo:
        /* Get current time.  */
-11:    movq    %rsp, %rdi
+       movq    %rsp, %rdi
        xorl    %esi, %esi
        movq    $VSYSCALL_ADDR_vgettimeofday, %rax
        callq   *%rax
@@ -112,20 +141,26 @@ pthread_rwlock_timedrdlock:
        movq    %rcx, (%rsp)    /* Store relative timeout.  */
        movq    %rdi, 8(%rsp)
 
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
-#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+       xorl    PSHARED(%r12), %esi
+# else
+#  if FUTEX_WAIT == 0
+       movl    PSHARED(%r12), %esi
+#  else
        movl    $FUTEX_WAIT, %esi
-#endif
+       orl     PSHARED(%r12), %esi
+#  endif
+       xorl    %fs:PRIVATE_FUTEX, %esi
+# endif
        movq    %rsp, %r10
        movl    %r14d, %edx
-       leaq    READERS_WAKEUP(%r12), %rdi
-       movl    $SYS_futex, %eax
-       syscall
-       movq    %rax, %rdx
-17:
 
-       /* Reget the lock.  */
+       jmp     21b
+       .previous
+#endif
+
+17:    /* Reget the lock.  */
        movl    $1, %esi
        xorl    %eax, %eax
        LOCK
@@ -157,17 +192,36 @@ pthread_rwlock_timedrdlock:
 
 7:     movq    %rdx, %rax
 
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
        addq    $16, %rsp
+       cfi_adjust_cfa_offset(-16)
        popq    %r14
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r14)
+#endif
        popq    %r13
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r13)
        popq    %r12
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r12)
        retq
 
-1:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       cfi_adjust_cfa_offset(16)
+       cfi_rel_offset(%r12, 8)
+       cfi_rel_offset(%r13, 0)
+#else
+       cfi_adjust_cfa_offset(40)
+       cfi_offset(%r12, -16)
+       cfi_offset(%r13, -24)
+       cfi_offset(%r14, -32)
+#endif
+1:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
        jmp     2b
 
 14:    cmpl    %fs:TID, %eax
@@ -175,13 +229,13 @@ pthread_rwlock_timedrdlock:
        movl    $EDEADLK, %edx
        jmp     9b
 
-6:
+6:     movl    PSHARED(%r12), %esi
 #if MUTEX == 0
        movq    %r12, %rdi
 #else
        leal    MUTEX(%r12), %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     7b
 
        /* Overflow.  */
@@ -194,22 +248,22 @@ pthread_rwlock_timedrdlock:
        movl    $EAGAIN, %edx
        jmp     9b
 
-10:
+10:    movl    PSHARED(%r12), %esi
 #if MUTEX == 0
        movq    %r12, %rdi
 #else
        leaq    MUTEX(%r12), %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     11b
 
-12:
+12:    movl    PSHARED(%r12), %esi
 #if MUTEX == 0
        movq    %r12, %rdi
 #else
        leaq    MUTEX(%r12), %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
        jmp     13b
 
 16:    movq    $-ETIMEDOUT, %rdx
@@ -217,4 +271,5 @@ pthread_rwlock_timedrdlock:
 
 19:    movl    $EINVAL, %edx
        jmp     9b
+       cfi_endproc
        .size   pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
index b479da7..401bbc5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
 
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday    0xffffffffff600000
 
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
        .text
 
        .globl  pthread_rwlock_timedwrlock
        .type   pthread_rwlock_timedwrlock,@function
        .align  16
 pthread_rwlock_timedwrlock:
+       cfi_startproc
        pushq   %r12
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r12, 0)
        pushq   %r13
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG        %edx
+#else
        pushq   %r14
+       cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r14, 0)
+
        subq    $16, %rsp
+       cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
 
        movq    %rdi, %r12
        movq    %rsi, %r13
@@ -74,7 +79,7 @@ pthread_rwlock_timedwrlock:
        incl    WRITERS_QUEUED(%r12)
        je      4f
 
-       movl    WRITERS_WAKEUP(%r12), %r14d
+       movl    WRITERS_WAKEUP(%r12), VALREG
 
        LOCK
 #if MUTEX == 0
@@ -84,8 +89,33 @@ pthread_rwlock_timedwrlock:
 #endif
        jne     10f
 
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+       cmpl    $0, __have_futex_clock_realtime(%rip)
+#  else
+       cmpl    $0, __have_futex_clock_realtime
+#  endif
+       je      .Lreltmo
+#endif
+
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+       xorl    PSHARED(%r12), %esi
+       movq    %r13, %r10
+       movl    $0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       movl    %r14d, %edx
+#endif
+21:    leaq    WRITERS_WAKEUP(%r12), %rdi
+       movl    $SYS_futex, %eax
+       syscall
+       movq    %rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       .subsection 2
+.Lreltmo:
        /* Get current time.  */
-11:    movq    %rsp, %rdi
+       movq    %rsp, %rdi
        xorl    %esi, %esi
        movq    $VSYSCALL_ADDR_vgettimeofday, %rax
        callq   *%rax
@@ -108,20 +138,26 @@ pthread_rwlock_timedwrlock:
        movq    %rcx, (%rsp)    /* Store relative timeout.  */
        movq    %rdi, 8(%rsp)
 
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
-#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+       xorl    PSHARED(%r12), %esi
+# else
+#  if FUTEX_WAIT == 0
+       movl    PSHARED(%r12), %esi
+#  else
        movl    $FUTEX_WAIT, %esi
-#endif
+       orl     PSHARED(%r12), %esi
+#  endif
+       xorl    %fs:PRIVATE_FUTEX, %esi
+# endif
        movq    %rsp, %r10
        movl    %r14d, %edx
-       leaq    WRITERS_WAKEUP(%r12), %rdi
-       movl    $SYS_futex, %eax
-       syscall
-       movq    %rax, %rdx
-17:
 
-       /* Reget the lock.  */
+       jmp     21b
+       .previous
+#endif
+
+17:    /* Reget the lock.  */
        movl    $1, %esi
        xorl    %eax, %eax
        LOCK
@@ -153,17 +189,36 @@ pthread_rwlock_timedwrlock:
 
 7:     movq    %rdx, %rax
 
+#ifndef __ASSUME_PRIVATE_FUTEX
        addq    $16, %rsp
+       cfi_adjust_cfa_offset(-16)
        popq    %r14
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r14)
+#endif
        popq    %r13
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r13)
        popq    %r12
+       cfi_adjust_cfa_offset(-8)
+       cfi_restore(%r12)
        retq
 
-1:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       cfi_adjust_cfa_offset(16)
+       cfi_rel_offset(%r12, 8)
+       cfi_rel_offset(%r13, 0)
+#else
+       cfi_adjust_cfa_offset(40)
+       cfi_offset(%r12, -16)
+       cfi_offset(%r13, -24)
+       cfi_offset(%r14, -32)
+#endif
+1:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
        jmp     2b
 
 14:    cmpl    %fs:TID, %eax
@@ -171,13 +226,13 @@ pthread_rwlock_timedwrlock:
 20:    movl    $EDEADLK, %edx
        jmp     9b
 
-6:
+6:     movl    PSHARED(%r12), %esi
 #if MUTEX == 0
        movq    %r12, %rdi
 #else
        leal    MUTEX(%r12), %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     7b
 
        /* Overflow.  */
@@ -185,22 +240,22 @@ pthread_rwlock_timedwrlock:
        movl    $EAGAIN, %edx
        jmp     9b
 
-10:
+10:    movl    PSHARED(%r12), %esi
 #if MUTEX == 0
        movq    %r12, %rdi
 #else
        leaq    MUTEX(%r12), %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     11b
 
-12:
+12:    movl    PSHARED(%r12), %esi
 #if MUTEX == 0
        movq    %r12, %rdi
 #else
        leaq    MUTEX(%r12), %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
        jmp     13b
 
 16:    movq    $-ETIMEDOUT, %rdx
@@ -208,4 +263,5 @@ pthread_rwlock_timedwrlock:
 
 19:    movl    $EINVAL, %edx
        jmp     9b
+       cfi_endproc
        .size   pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
index a0f7522..cfcc7a1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
@@ -37,6 +29,7 @@
        .type   __pthread_rwlock_unlock,@function
        .align  16
 __pthread_rwlock_unlock:
+       cfi_startproc
        /* Get the lock.  */
        movl    $1, %esi
        xorl    %eax, %eax
@@ -55,9 +48,8 @@ __pthread_rwlock_unlock:
 
 5:     movl    $0, WRITER(%rdi)
 
-       movl    $1, %esi
+       movl    $1, %edx
        leaq    WRITERS_WAKEUP(%rdi), %r10
-       movq    %rsi, %rdx
        cmpl    $0, WRITERS_QUEUED(%rdi)
        jne     0f
 
@@ -77,7 +69,16 @@ __pthread_rwlock_unlock:
 #endif
        jne     7f
 
-8:     movl    $SYS_futex, %eax
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %esi
+       xorl    PSHARED(%rdi), %esi
+#else
+       movl    $FUTEX_WAKE, %esi
+       orl     PSHARED(%rdi), %esi
+       xorl    %fs:PRIVATE_FUTEX, %esi
+#endif
+       movl    $SYS_futex, %eax
        movq    %r10, %rdi
        syscall
 
@@ -96,30 +97,30 @@ __pthread_rwlock_unlock:
 4:     xorl    %eax, %eax
        retq
 
-1:
+1:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
        jmp     2b
 
-3:
+3:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     4b
 
-7:
+7:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     8b
-
+       cfi_endproc
        .size   __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
 
        .globl  pthread_rwlock_unlock
index 39b54dc..b7bc852 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
 
 
        .text
@@ -39,6 +30,7 @@
        .type   __pthread_rwlock_wrlock,@function
        .align  16
 __pthread_rwlock_wrlock:
+       cfi_startproc
        xorq    %r10, %r10
 
        /* Get the lock.  */
@@ -71,12 +63,20 @@ __pthread_rwlock_wrlock:
 #endif
        jne     10f
 
-11:    addq    $WRITERS_WAKEUP, %rdi
-#if FUTEX_WAIT == 0
-       xorl    %esi, %esi
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+       movl    $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+       xorl    PSHARED(%rdi), %esi
 #else
+# if FUTEX_WAIT == 0
+       movl    PSHARED(%rdi), %esi
+# else
        movl    $FUTEX_WAIT, %esi
+       orl     PSHARED(%rdi), %esi
+# endif
+       xorl    %fs:PRIVATE_FUTEX, %esi
 #endif
+       addq    $WRITERS_WAKEUP, %rdi
        movl    $SYS_futex, %eax
        syscall
 
@@ -111,11 +111,11 @@ __pthread_rwlock_wrlock:
        movq    %rdx, %rax
        retq
 
-1:
+1:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
@@ -126,36 +126,37 @@ __pthread_rwlock_wrlock:
        movl    $EDEADLK, %edx
        jmp     9b
 
-6:
+6:     movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
        jmp     7b
 
 4:     decl    WRITERS_QUEUED(%rdi)
        movl    $EAGAIN, %edx
        jmp     9b
 
-10:
+10:    movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_unlock_wake
+       callq   __lll_unlock_wake
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
        jmp     11b
 
-12:
+12:    movl    PSHARED(%rdi), %esi
 #if MUTEX != 0
        addq    $MUTEX, %rdi
 #endif
-       callq   __lll_mutex_lock_wait
+       callq   __lll_lock_wait
 #if MUTEX != 0
        subq    $MUTEX, %rdi
 #endif
        jmp     13b
+       cfi_endproc
        .size   __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
 
        .globl  pthread_rwlock_wrlock
index 5c8a858..7af6524 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <pthread-errnos.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE             1
+#include <structsem.h>
 
 
        .text
        .type   sem_post,@function
        .align  16
 sem_post:
-       movl    $1, %edx
+#if VALUE == 0
+       movl    (%rdi), %eax
+#else
+       movl    VALUE(%rdi), %eax
+#endif
+0:     cmpl    $SEM_VALUE_MAX, %eax
+       je      3f
+       leal    1(%rax), %esi
        LOCK
-       xaddl   %edx, (%rdi)
+#if VALUE == 0
+       cmpxchgl %esi, (%rdi)
+#else
+       cmpxchgl %esi, VALUE(%rdi)
+#endif
+       jnz     0b
+
+       cmpq    $0, NWAITERS(%rdi)
+       je      2f
 
        movl    $SYS_futex, %eax
        movl    $FUTEX_WAKE, %esi
-       incl    %edx
+       orl     PRIVATE(%rdi), %esi
+       movl    $1, %edx
        syscall
 
        testq   %rax, %rax
        js      1f
 
-       xorl    %eax, %eax
+2:     xorl    %eax, %eax
        retq
 
 1:
 #if USE___THREAD
-       movq    errno@gottpoff(%rip), %rdx
-       movl    $EINVAL, %fs:(%rdx)
+       movl    $EINVAL, %eax
+#else
+       callq   __errno_location@plt
+       movl    $EINVAL, %edx
+#endif
+       jmp     4f
+
+3:
+#if USE___THREAD
+       movl    $EOVERFLOW, %eax
 #else
        callq   __errno_location@plt
-       movl    $EINVAL, (%rax)
+       movl    $EOVERFLOW, %edx
 #endif
 
+4:
+#if USE___THREAD
+       movq    errno@gottpoff(%rip), %rdx
+       movl    %eax, %fs:(%rdx)
+#else
+       movl    %edx, (%rax)
+#endif
        orl     $-1, %eax
        retq
        .size   sem_post,.-sem_post
index 64e1680..f9af8ec 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
+#include <structsem.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
 
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday    0xffffffffff600000
 
-
        .text
 
        .globl  sem_timedwait
        .type   sem_timedwait,@function
        .align  16
-       cfi_startproc
 sem_timedwait:
-       /* First check for cancellation.  */
-       movl    %fs:CANCELHANDLING, %eax
-       andl    $0xfffffff9, %eax
-       cmpl    $8, %eax
-       je      11f
-
+.LSTARTCODE:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+#if VALUE == 0
        movl    (%rdi), %eax
+#else
+       movl    VALUE(%rdi), %eax
+#endif
 2:     testl   %eax, %eax
        je      1f
 
        leaq    -1(%rax), %rdx
        LOCK
+#if VALUE == 0
        cmpxchgl %edx, (%rdi)
+#else
+       cmpxchgl %edx, VALUE(%rdi)
+#endif
        jne     2b
 
        xorl    %eax, %eax
        retq
 
        /* Check whether the timeout value is valid.  */
-1:     pushq   %r12
+1:     cmpq    $1000000000, 8(%rsi)
+       jae     6f
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+       cmpl    $0, __have_futex_clock_realtime(%rip)
+#  else
+       cmpl    $0, __have_futex_clock_realtime
+#  endif
+       je      .Lreltmo
+#endif
+
+       /* This push is only needed to store the sem_t pointer for the
+          exception handler.  */
+       pushq   %rdi
+       cfi_adjust_cfa_offset(8)
+
+       movq    %rsi, %r10
+
+       LOCK
+       addq    $1, NWAITERS(%rdi)
+
+.LcleanupSTART:
+13:    call    __pthread_enable_asynccancel
+       movl    %eax, %r8d
+
+#if VALUE != 0
+       leaq    VALUE(%rdi), %rdi
+#endif
+       movl    $0xffffffff, %r9d
+       movl    $FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+       orl     PRIVATE(%rdi), %esi
+       movl    $SYS_futex, %eax
+       xorl    %edx, %edx
+       syscall
+       movq    %rax, %r9
+#if VALUE != 0
+       leaq    -VALUE(%rdi), %rdi
+#endif
+
+       xchgq   %r8, %rdi
+       call    __pthread_disable_asynccancel
+.LcleanupEND:
+       movq    %r8, %rdi
+
+       testq   %r9, %r9
+       je      11f
+       cmpq    $-EWOULDBLOCK, %r9
+       jne     3f
+
+11:
+#if VALUE == 0
+       movl    (%rdi), %eax
+#else
+       movl    VALUE(%rdi), %eax
+#endif
+14:    testl   %eax, %eax
+       je      13b
+
+       leaq    -1(%rax), %rcx
+       LOCK
+#if VALUE == 0
+       cmpxchgl %ecx, (%rdi)
+#else
+       cmpxchgl %ecx, VALUE(%rdi)
+#endif
+       jne     14b
+
+       xorl    %eax, %eax
+
+15:    LOCK
+       subq    $1, NWAITERS(%rdi)
+
+       leaq    8(%rsp), %rsp
+       cfi_adjust_cfa_offset(-8)
+       retq
+
+       cfi_adjust_cfa_offset(8)
+3:     negq    %r9
+#if USE___THREAD
+       movq    errno@gottpoff(%rip), %rdx
+       movl    %r9d, %fs:(%rdx)
+#else
+       callq   __errno_location@plt
+       movl    %r9d, (%rax)
+#endif
+
+       orl     $-1, %eax
+       jmp     15b
+
+       cfi_adjust_cfa_offset(-8)
+6:
+#if USE___THREAD
+       movq    errno@gottpoff(%rip), %rdx
+       movl    $EINVAL, %fs:(%rdx)
+#else
+       callq   __errno_location@plt
+       movl    $EINVAL, (%rax)
+#endif
+
+       orl     $-1, %eax
+
+       retq
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+       pushq   %r12
        cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r12, 0)
        pushq   %r13
        cfi_adjust_cfa_offset(8)
+       cfi_rel_offset(%r13, 0)
        pushq   %r14
        cfi_adjust_cfa_offset(8)
-       subq    $24, %rsp
-       cfi_adjust_cfa_offset(24)
+       cfi_rel_offset(%r14, 0)
+
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define STACKFRAME 8
+#else
+# define STACKFRAME 24
+#endif
+       subq    $STACKFRAME, %rsp
+       cfi_adjust_cfa_offset(STACKFRAME)
 
        movq    %rdi, %r12
-       cfi_offset(12, -16)             /* %r12 */
        movq    %rsi, %r13
-       cfi_offset(13, -24)             /* %r13 */
 
-       /* Check for invalid nanosecond field.  */
-       cmpq    $1000000000, 8(%r13)
-       movl    $EINVAL, %r14d
-       cfi_offset(14, -24)             /* %r14 */
-       jae     6f
-
-7:     call    __pthread_enable_asynccancel
-       movl    %eax, 16(%rsp)
+       LOCK
+       addq    $1, NWAITERS(%r12)
 
-       xorl    %esi, %esi
+7:     xorl    %esi, %esi
        movq    %rsp, %rdi
        movq    $VSYSCALL_ADDR_vgettimeofday, %rax
        callq   *%rax
@@ -99,14 +212,27 @@ sem_timedwait:
        decq    %rdi
 5:     testq   %rdi, %rdi
        movl    $ETIMEDOUT, %r14d
-       js      6f              /* Time is already up.  */
+       js      36f             /* Time is already up.  */
 
        movq    %rdi, (%rsp)    /* Store relative timeout.  */
        movq    %rsi, 8(%rsp)
 
+.LcleanupSTART2:
+       call    __pthread_enable_asynccancel
+       movl    %eax, 16(%rsp)
+
        movq    %rsp, %r10
+# if VALUE == 0
        movq    %r12, %rdi
-       xorl    %esi, %esi
+# else
+       leaq    VALUE(%r12), %rdi
+# endif
+# if FUTEX_WAIT == 0
+       movl    PRIVATE(%rdi), %esi
+# else
+       movl    $FUTEX_WAIT, %esi
+       orl     PRIVATE(%rdi), %esi
+# endif
        movl    $SYS_futex, %eax
        xorl    %edx, %edx
        syscall
@@ -114,41 +240,55 @@ sem_timedwait:
 
        movl    16(%rsp), %edi
        call    __pthread_disable_asynccancel
+.LcleanupEND2:
 
        testq   %r14, %r14
        je      9f
        cmpq    $-EWOULDBLOCK, %r14
-       jne     3f
+       jne     33f
 
-9:     movl    (%r12), %eax
+9:
+# if VALUE == 0
+       movl    (%r12), %eax
+# else
+       movl    VALUE(%r12), %eax
+# endif
 8:     testl   %eax, %eax
        je      7b
 
        leaq    -1(%rax), %rcx
        LOCK
+# if VALUE == 0
        cmpxchgl %ecx, (%r12)
+# else
+       cmpxchgl %ecx, VALUE(%r12)
+# endif
        jne     8b
 
        xorl    %eax, %eax
-10:    addq    $24, %rsp
-       cfi_adjust_cfa_offset(-24)
+
+45:    LOCK
+       subq    $1, NWAITERS(%r12)
+
+       addq    $STACKFRAME, %rsp
+       cfi_adjust_cfa_offset(-STACKFRAME)
        popq    %r14
        cfi_adjust_cfa_offset(-8)
-       cfi_restore(14)
+       cfi_restore(%r14)
        popq    %r13
        cfi_adjust_cfa_offset(-8)
-       cfi_restore(13)
+       cfi_restore(%r13)
        popq    %r12
        cfi_adjust_cfa_offset(-8)
-       cfi_restore(12)
+       cfi_restore(%r12)
        retq
 
-       cfi_adjust_cfa_offset(48)
-       cfi_offset(12, -16)             /* %r12 */
-       cfi_offset(13, -24)             /* %r13 */
-       cfi_offset(14, -32)             /* %r14 */
-3    negq    %r14
-6:
+       cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+       cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+       cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+       cfi_rel_offset(%r14, STACKFRAME)
+33:    negq    %r14
+36:
 #if USE___THREAD
        movq    errno@gottpoff(%rip), %rdx
        movl    %r14d, %fs:(%rdx)
@@ -158,17 +298,90 @@ sem_timedwait:
 #endif
 
        orl     $-1, %eax
-       jmp     10b
-       cfi_adjust_cfa_offset(-48)
-       cfi_restore(14)
-       cfi_restore(13)
-       cfi_restore(12)
-
-11:    /* Canceled.  */
-       movq    $0xffffffffffffffff, %fs:RESULT
-       LOCK
-       orl     $0x10, %fs:CANCELHANDLING
-       movq    %fs:CLEANUP_JMP_BUF, %rdi
-       jmp     HIDDEN_JUMPTARGET (__pthread_unwind)
+       jmp     45b
+#endif
        cfi_endproc
        .size   sem_timedwait,.-sem_timedwait
+
+
+       .type   sem_timedwait_cleanup,@function
+sem_timedwait_cleanup:
+       cfi_startproc
+       cfi_adjust_cfa_offset(8)
+
+       movq    (%rsp), %rdi
+       LOCK
+       subq    $1, NWAITERS(%rdi)
+       movq    %rax, %rdi
+.LcallUR:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE:
+       cfi_endproc
+       .size   sem_timedwait_cleanup,.-sem_timedwait_cleanup
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       .type   sem_timedwait_cleanup2,@function
+sem_timedwait_cleanup2:
+       cfi_startproc
+       cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+       cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+       cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+       cfi_rel_offset(%r14, STACKFRAME)
+
+       LOCK
+       subq    $1, NWAITERS(%r12)
+       movq    %rax, %rdi
+       movq    STACKFRAME(%rsp), %r14
+       movq    STACKFRAME+8(%rsp), %r13
+       movq    STACKFRAME+16(%rsp), %r12
+.LcallUR2:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE2:
+       cfi_endproc
+       .size   sem_timedwait_cleanup2,.-sem_timedwait_cleanup2
+#endif
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   DW_EH_PE_omit                   # @LPStart format
+       .byte   DW_EH_PE_omit                   # @TType format
+       .byte   DW_EH_PE_uleb128                # call-site format
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 sem_timedwait_cleanup-.LSTARTCODE
+       .uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       .uleb128 .LcleanupSTART2-.LSTARTCODE
+       .uleb128 .LcleanupEND2-.LcleanupSTART2
+       .uleb128 sem_timedwait_cleanup2-.LSTARTCODE
+       .uleb128  0
+#endif
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+       .uleb128 .LcallUR2-.LSTARTCODE
+       .uleb128 .LENDCODE2-.LcallUR2
+       .uleb128 0
+       .uleb128  0
+#endif
+.Lcstend:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  8
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+       .quad   __gcc_personality_v0
+#endif
index 08edc39..7b7f63d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <pthread-errnos.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
        .text
 
        .globl  sem_trywait
@@ -36,7 +31,7 @@ sem_trywait:
 2:     testl   %eax, %eax
        jz      1f
 
-       leaq    -1(%rax), %rdx
+       leal    -1(%rax), %edx
        LOCK
        cmpxchgl %edx, (%rdi)
        jne     2b
index c2f94d4..73d1d16 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <structsem.h>
 
 
        .text
        .globl  sem_wait
        .type   sem_wait,@function
        .align  16
-       cfi_startproc
 sem_wait:
-       /* First check for cancellation.  */
-       movl    %fs:CANCELHANDLING, %eax
-       andl    $0xfffffff9, %eax
-       cmpl    $8, %eax
-       je      4f
-
-       pushq   %r12
-       cfi_adjust_cfa_offset(8)
-       cfi_offset(12, -16)
-       pushq   %r13
-       cfi_adjust_cfa_offset(8)
-       movq    %rdi, %r13
-       cfi_offset(13, -24)
+.LSTARTCODE:
+       cfi_startproc
+#ifdef SHARED
+       cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+                       DW.ref.__gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+       cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+       cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
 
-3:     movl    (%r13), %eax
+#if VALUE == 0
+       movl    (%rdi), %eax
+#else
+       movl    VALUE(%rdi), %eax
+#endif
 2:     testl   %eax, %eax
        je      1f
 
-       leaq    -1(%rax), %rdx
+       leal    -1(%rax), %edx
        LOCK
-       cmpxchgl %edx, (%r13)
+#if VALUE == 0
+       cmpxchgl %edx, (%rdi)
+#else
+       cmpxchgl %edx, VALUE(%rdi)
+#endif
        jne     2b
+
        xorl    %eax, %eax
+       retq
 
-       popq    %r13
-       cfi_adjust_cfa_offset(-8)
-       cfi_restore(13)
-       popq    %r12
-       cfi_adjust_cfa_offset(-8)
-       cfi_restore(12)
+       /* This push is only needed to store the sem_t pointer for the
+          exception handler.  */
+1:     pushq   %rdi
+       cfi_adjust_cfa_offset(8)
 
-       retq
+       LOCK
+       addq    $1, NWAITERS(%rdi)
 
-       cfi_adjust_cfa_offset(16)
-       cfi_offset(12, -16)
-       cfi_offset(13, -24)
-1:     call    __pthread_enable_asynccancel
+.LcleanupSTART:
+6:     call    __pthread_enable_asynccancel
        movl    %eax, %r8d
 
        xorq    %r10, %r10
        movl    $SYS_futex, %eax
-       movq    %r13, %rdi
-       movq    %r10, %rsi
-       movq    %r10, %rdx
+#if FUTEX_WAIT == 0
+       movl    PRIVATE(%rdi), %esi
+#else
+       movl    $FUTEX_WAIT, %esi
+       orl     PRIVATE(%rdi), %esi
+#endif
+       xorl    %edx, %edx
        syscall
-       movq    %rax, %r12
+       movq    %rax, %rcx
 
-       movl    %r8d, %edi
+       xchgq   %r8, %rdi
        call    __pthread_disable_asynccancel
+.LcleanupEND:
+       movq    %r8, %rdi
 
-       testq   %r12, %r12
-       je      3b
-       cmpq    $-EWOULDBLOCK, %r12
-       je      3b
-       negq    %r12
+       testq   %rcx, %rcx
+       je      3f
+       cmpq    $-EWOULDBLOCK, %rcx
+       jne     4f
+
+3:
+#if VALUE == 0
+       movl    (%rdi), %eax
+#else
+       movl    VALUE(%rdi), %eax
+#endif
+5:     testl   %eax, %eax
+       je      6b
+
+       leal    -1(%rax), %edx
+       LOCK
+#if VALUE == 0
+       cmpxchgl %edx, (%rdi)
+#else
+       cmpxchgl %edx, VALUE(%rdi)
+#endif
+       jne     5b
+
+       xorl    %eax, %eax
+
+9:     LOCK
+       subq    $1, NWAITERS(%rdi)
+
+       leaq    8(%rsp), %rsp
+       cfi_adjust_cfa_offset(-8)
+
+       retq
+
+       cfi_adjust_cfa_offset(8)
+4:     negq    %rcx
 #if USE___THREAD
        movq    errno@gottpoff(%rip), %rdx
-       movl    %r12d, %fs:(%rdx)
+       movl    %ecx, %fs:(%rdx)
 #else
+# error "not supported.  %rcx and %rdi must be preserved"
        callq   __errno_location@plt
-       movl    %r12d, (%rax)
+       movl    %ecx, (%rax)
 #endif
        orl     $-1, %eax
 
-       popq    %r13
-       cfi_adjust_cfa_offset(-8)
-       cfi_restore(13)
-       popq    %r12
-       cfi_adjust_cfa_offset(-8)
-       cfi_restore(12)
+       jmp 9b
+       .size   sem_wait,.-sem_wait
 
-       retq
 
-4:     /* Canceled.  */
-       movq    $0xffffffffffffffff, %fs:RESULT
+       .type   sem_wait_cleanup,@function
+sem_wait_cleanup:
+       movq    (%rsp), %rdi
        LOCK
-       orl     $0x10, %fs:CANCELHANDLING
-       movq    %fs:CLEANUP_JMP_BUF, %rdi
-       jmp     HIDDEN_JUMPTARGET (__pthread_unwind)
+       subq    $1, NWAITERS(%rdi)
+       movq    %rax, %rdi
+.LcallUR:
+       call    _Unwind_Resume@PLT
+       hlt
+.LENDCODE:
        cfi_endproc
-       .size   sem_wait,.-sem_wait
+       .size   sem_wait_cleanup,.-sem_wait_cleanup
+
+
+       .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+       .byte   DW_EH_PE_omit                   # @LPStart format
+       .byte   DW_EH_PE_omit                   # @TType format
+       .byte   DW_EH_PE_uleb128                # call-site format
+       .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+       .uleb128 .LcleanupSTART-.LSTARTCODE
+       .uleb128 .LcleanupEND-.LcleanupSTART
+       .uleb128 sem_wait_cleanup-.LSTARTCODE
+       .uleb128  0
+       .uleb128 .LcallUR-.LSTARTCODE
+       .uleb128 .LENDCODE-.LcallUR
+       .uleb128 0
+       .uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+       .hidden DW.ref.__gcc_personality_v0
+       .weak   DW.ref.__gcc_personality_v0
+       .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+       .align  8
+       .type   DW.ref.__gcc_personality_v0, @object
+       .size   DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+       .quad   __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/x86_64/Makefile b/libpthread/nptl/sysdeps/x86_64/Makefile
deleted file mode 100644 (file)
index 2f0d88f..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (C) 2002, 2003 Free Software Foundation, Inc.
-# This file is part of the GNU C Library.
-
-# The GNU C Library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-
-# The GNU C Library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with the GNU C Library; if not, write to the Free
-# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-# 02111-1307 USA.
-
-ifeq ($(subdir),csu)
-gen-as-const-headers += tcb-offsets.sym
-endif
-
-ifeq ($(subdir),nptl)
-CFLAGS-pthread_create.c += -mpreferred-stack-boundary=4
-CFLAGS-tst-align.c += -mpreferred-stack-boundary=4
-CFLAGS-tst-align2.c += -mpreferred-stack-boundary=4
-endif
index 8118d2d..cf86375 100644 (file)
@@ -10,3 +10,19 @@ CLEANUP                      offsetof (struct pthread, cleanup)
 CLEANUP_PREV           offsetof (struct _pthread_cleanup_buffer, __prev)
 MUTEX_FUTEX            offsetof (pthread_mutex_t, __data.__lock)
 MULTIPLE_THREADS_OFFSET        offsetof (tcbhead_t, multiple_threads)
+POINTER_GUARD          offsetof (tcbhead_t, pointer_guard)
+VGETCPU_CACHE_OFFSET   offsetof (tcbhead_t, vgetcpu_cache)
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX          offsetof (tcbhead_t, private_futex)
+#endif
+RTLD_SAVESPACE_SSE     offsetof (tcbhead_t, rtld_savespace_sse)
+
+-- Not strictly offsets, but these values are also used in the TCB.
+TCB_CANCELSTATE_BITMASK         CANCELSTATE_BITMASK
+TCB_CANCELTYPE_BITMASK  CANCELTYPE_BITMASK
+TCB_CANCELING_BITMASK   CANCELING_BITMASK
+TCB_CANCELED_BITMASK    CANCELED_BITMASK
+TCB_EXITING_BITMASK     EXITING_BITMASK
+TCB_CANCEL_RESTMASK     CANCEL_RESTMASK
+TCB_TERMINATED_BITMASK  TERMINATED_BITMASK
+TCB_PTHREAD_CANCELED    PTHREAD_CANCELED
index 7b40e3d..396ad42 100644 (file)
@@ -27,6 +27,7 @@
 # include <stdint.h>
 # include <stdlib.h>
 # include <sysdep.h>
+# include <bits/kernel-features.h>
 # include <bits/wordsize.h>
 # include <xmmintrin.h>
 
@@ -85,7 +86,7 @@ typedef struct
 #define HAVE_TLS_MODEL_ATTRIBUTE 1
 
 /* Signal that TLS support is available.  */
-#define USE_TLS        1
+#define USE_TLS        1
 
 /* Alignment requirement for the stack.  */
 #define STACK_ALIGN    16
index 56a4238..9a35695 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>
    and Richard Henderson <rth@redhat.com>, 2003.
@@ -23,7 +23,7 @@
 #include <string.h>
 #include <unistd.h>
 #include "pthreadP.h"
-#include "jmpbuf-unwind.h"
+#include <jmpbuf-unwind.h>
 
 #ifdef HAVE_FORCED_UNWIND
 
index c430673..1e1a3cf 100644 (file)
@@ -40,4 +40,4 @@ int __pthread_multiple_threads attribute_hidden;
 /* Table of the key information.  */
 struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX]
   __attribute__ ((nocommon));
-hidden_def (__pthread_keys)
+hidden_data_def (__pthread_keys)
index f2fd25f..b69556e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -23,7 +23,7 @@
 
 static const char banner[] =
 #include "banner.h"
-"Copyright (C) 2003 Free Software Foundation, Inc.\n\
+"Copyright (C) 2006 Free Software Foundation, Inc.\n\
 This is free software; see the source for copying conditions.\n\
 There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A\n\
 PARTICULAR PURPOSE.\n"
index 27d3f1f..f8bfdc2 100644 (file)
@@ -22,6 +22,11 @@ extern pthread_once_t __helper_once attribute_hidden;
 
 /* TID of the helper thread.  */
 extern pid_t __helper_tid attribute_hidden;
+
+/* List of active SIGEV_THREAD timers.  */
+extern struct timer *__active_timer_sigev_thread attribute_hidden;
+/* Lock for the __active_timer_sigev_thread.  */
+extern pthread_mutex_t __active_timer_sigev_thread_lock attribute_hidden;
 #endif
 
 /* Type of timers in the kernel */
@@ -47,4 +52,7 @@ struct timer {
 #ifdef __UCLIBC_HAS_THREADS__
     pthread_attr_t attr;
 #endif
+
+    /* Next element in list of active SIGEV_THREAD timers. */
+    struct timer *next;
 };