summaryrefslogtreecommitdiff
path: root/libc/nptl
diff options
context:
space:
mode:
authorjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2012-03-07 19:31:25 +0000
committerjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2012-03-07 19:31:25 +0000
commit166cc47deaa1da2477806e3bb9a1c6bd0b94ff52 (patch)
tree12c479e061d53a1ec8c9bf0451e84ac191323792 /libc/nptl
parent68958b4bf50cfd014e72d20fbe187f4d1ddee22f (diff)
Merge changes between r17384 and r17457 from /fsf/trunk.
git-svn-id: svn://svn.eglibc.org/trunk@17458 7b3dc134-2b1b-0410-93df-9e9f96275f8d
Diffstat (limited to 'libc/nptl')
-rw-r--r--libc/nptl/ChangeLog33
-rw-r--r--libc/nptl/Makefile2
-rw-r--r--libc/nptl/sysdeps/pthread/bits/libc-lock.h434
-rw-r--r--libc/nptl/sysdeps/pthread/bits/libc-lockP.h472
-rw-r--r--libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c3
-rw-r--r--libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_post.c9
-rw-r--r--libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_timedwait.c3
-rw-r--r--libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c3
8 files changed, 515 insertions, 444 deletions
diff --git a/libc/nptl/ChangeLog b/libc/nptl/ChangeLog
index 23040e463..eb2e38daf 100644
--- a/libc/nptl/ChangeLog
+++ b/libc/nptl/ChangeLog
@@ -1,7 +1,32 @@
+2012-03-07 Ulrich Drepper <drepper@gmail.com>
+
+ * Makefile (distribute): Remove variable.
+
+2012-01-23 Thomas Schwinge <thomas@codesourcery.com>
+
+ * sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Get rid of
+ superfluous assignment.
+ * sysdeps/unix/sysv/linux/sparc/sem_timedwait.c (sem_timedwait):
+ Likewise.
+ * sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
+ (sem_timedwait): Likewise.
+
+2012-03-06 Ulrich Drepper <drepper@gmail.com>
+
+ * sysdeps/pthread/bits/libc-lock.h: Move information not needed in
+ installed headers to...
+ * sysdeps/pthread/bits/libc-lockP.h: ...here. New file.
+
+2012-03-06 David S. Miller <davem@davemloft.net>
+
+ * sysdeps/unix/sysv/linux/sparc/sem_post.c (__new_sem_post): Use
+ atomic_increment and remove unused local variable.
+ (__old_sem_post): Likewise.
+
2012-02-27 David S. Miller <davem@davemloft.net>
- * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Don't refer to non-existing
- __pthread_attr.
+ * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Don't refer to
+ non-existing __pthread_attr.
* sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h: Likewise.
* sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h: Likewise.
* sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h: Likewise.
@@ -42,8 +67,8 @@
2012-02-16 Richard Henderson <rth@twiddle.net>
- * nptl/sysdeps/unix/sysv/linux/s390/s390-32/pt-initfini.c: Remove file.
- * nptl/sysdeps/unix/sysv/linux/s390/s390-64/pt-initfini.c: Remove file.
+ * sysdeps/unix/sysv/linux/s390/s390-32/pt-initfini.c: Remove file.
+ * sysdeps/unix/sysv/linux/s390/s390-64/pt-initfini.c: Remove file.
2012-02-15 Kaz Kojima <kkojima@rr.iij4u.or.jp>
diff --git a/libc/nptl/Makefile b/libc/nptl/Makefile
index 238920a36..10144d2a8 100644
--- a/libc/nptl/Makefile
+++ b/libc/nptl/Makefile
@@ -282,8 +282,6 @@ tests-nolibpthread = tst-unload
# of the page size since every architecture's page size is > 1k.
tst-oddstacklimit-ENV = ; ulimit -s 1023;
-distribute = eintr.c tst-cleanup4aux.c
-
gen-as-const-headers = pthread-errnos.sym
LDFLAGS-pthread.so = -Wl,--enable-new-dtags,-z,nodelete,-z,initfirst
diff --git a/libc/nptl/sysdeps/pthread/bits/libc-lock.h b/libc/nptl/sysdeps/pthread/bits/libc-lock.h
index 9faa5281a..297cf207e 100644
--- a/libc/nptl/sysdeps/pthread/bits/libc-lock.h
+++ b/libc/nptl/sysdeps/pthread/bits/libc-lock.h
@@ -24,12 +24,6 @@
#include <stddef.h>
-/* Fortunately Linux now has a mean to do locking which is realtime
- safe without the aid of the thread library. We also need no fancy
- options like error checking mutexes etc. We only need simple
- locks, maybe recursive. This can be easily and cheaply implemented
- using futexes. We will use them everywhere except in ld.so since
- ld.so might be used on old kernels with a different libc.so. */
#ifdef _LIBC
# include <lowlevellock.h>
# include <tls.h>
@@ -41,27 +35,14 @@
/* Mutex type. */
#if defined _LIBC || defined _IO_MTSAFE_IO
# if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
-typedef pthread_mutex_t __libc_lock_t;
typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
# else
-typedef int __libc_lock_t;
typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
# endif
-typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
-# ifdef __USE_UNIX98
-typedef pthread_rwlock_t __libc_rwlock_t;
-# else
-typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
-# endif
#else
-typedef struct __libc_lock_opaque__ __libc_lock_t;
typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
-typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
#endif
-/* Type for key to thread-specific data. */
-typedef pthread_key_t __libc_key_t;
-
/* Define a lock variable NAME with storage class CLASS. The lock must be
initialized with __libc_lock_init before it can be used (or define it
with __libc_lock_define_initialized, below). Use `extern' for CLASS to
@@ -69,44 +50,8 @@ typedef pthread_key_t __libc_key_t;
definitions you must use a pointer to the lock structure (i.e., NAME
begins with a `*'), because its storage size will not be known outside
of libc. */
-#define __libc_lock_define(CLASS,NAME) \
- CLASS __libc_lock_t NAME;
-#define __libc_rwlock_define(CLASS,NAME) \
- CLASS __libc_rwlock_t NAME;
#define __libc_lock_define_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME;
-#define __rtld_lock_define_recursive(CLASS,NAME) \
- CLASS __rtld_lock_recursive_t NAME;
-
-/* Define an initialized lock variable NAME with storage class CLASS.
-
- For the C library we take a deeper look at the initializer. For
- this implementation all fields are initialized to zero. Therefore
- we don't initialize the variable which allows putting it into the
- BSS section. (Except on PA-RISC and other odd architectures, where
- initialized locks must be set to one due to the lack of normal
- atomic operations.) */
-
-#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
-# if LLL_LOCK_INITIALIZER == 0
-# define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME;
-# else
-# define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
-# endif
-#else
-# if __LT_SPINLOCK_INIT == 0
-# define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME;
-# else
-# define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
-# endif
-#endif
-
-#define __libc_rwlock_define_initialized(CLASS,NAME) \
- CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
/* Define an initialized recursive lock variable NAME with storage
class CLASS. */
@@ -127,63 +72,7 @@ typedef pthread_key_t __libc_key_t;
{PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
#endif
-#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
- CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
-#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
- {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
-
-#define __rtld_lock_initialize(NAME) \
- (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
-
-/* If we check for a weakly referenced symbol and then perform a
- normal jump to it te code generated for some platforms in case of
- PIC is unnecessarily slow. What would happen is that the function
- is first referenced as data and then it is called indirectly
- through the PLT. We can make this a direct jump. */
-#ifdef __PIC__
-# define __libc_maybe_call(FUNC, ARGS, ELSE) \
- (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
- _fn != NULL ? (*_fn) ARGS : ELSE; }))
-#else
-# define __libc_maybe_call(FUNC, ARGS, ELSE) \
- (FUNC != NULL ? FUNC ARGS : ELSE)
-#endif
-
-/* Call thread functions through the function pointer table. */
-#if defined SHARED && !defined NOT_IN_libc
-# define PTFAVAIL(NAME) __libc_pthread_functions_init
-# define __libc_ptf_call(FUNC, ARGS, ELSE) \
- (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
-# define __libc_ptf_call_always(FUNC, ARGS) \
- PTHFCT_CALL (ptr_##FUNC, ARGS)
-#else
-# define PTFAVAIL(NAME) (NAME != NULL)
-# define __libc_ptf_call(FUNC, ARGS, ELSE) \
- __libc_maybe_call (FUNC, ARGS, ELSE)
-# define __libc_ptf_call_always(FUNC, ARGS) \
- FUNC ARGS
-#endif
-
-
-/* Initialize the named lock variable, leaving it in a consistent, unlocked
- state. */
-#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
-# define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
-#else
-# define __libc_lock_init(NAME) \
- __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
-#endif
-#if defined SHARED && !defined NOT_IN_libc
-/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
- inefficient. */
-# define __libc_rwlock_init(NAME) \
- (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
-#else
-# define __libc_rwlock_init(NAME) \
- __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
-#endif
-
-/* Same as last but this time we initialize a recursive mutex. */
+/* Initialize a recursive mutex. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_init_recursive(NAME) \
((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
@@ -201,34 +90,6 @@ typedef pthread_key_t __libc_key_t;
} while (0)
#endif
-#define __rtld_lock_init_recursive(NAME) \
- do { \
- if (__pthread_mutex_init != NULL) \
- { \
- pthread_mutexattr_t __attr; \
- __pthread_mutexattr_init (&__attr); \
- __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
- __pthread_mutex_init (&(NAME).mutex, &__attr); \
- __pthread_mutexattr_destroy (&__attr); \
- } \
- } while (0)
-
-/* Finalize the named lock variable, which must be locked. It cannot be
- used again until __libc_lock_init is called again on it. This must be
- called on a lock variable before the containing storage is reused. */
-#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
-# define __libc_lock_fini(NAME) ((void) 0)
-#else
-# define __libc_lock_fini(NAME) \
- __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
-#endif
-#if defined SHARED && !defined NOT_IN_libc
-# define __libc_rwlock_fini(NAME) ((void) 0)
-#else
-# define __libc_rwlock_fini(NAME) \
- __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
-#endif
-
/* Finalize recursive named lock. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_fini_recursive(NAME) ((void) 0)
@@ -237,31 +98,6 @@ typedef pthread_key_t __libc_key_t;
__libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
#endif
-/* Lock the named lock variable. */
-#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
-# if __OPTION_EGLIBC_BIG_MACROS != 1
-/* EGLIBC: Declare wrapper function for a big macro if either
- !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
- small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2). */
-extern void __libc_lock_lock_fn (__libc_lock_t *);
-libc_hidden_proto (__libc_lock_lock_fn);
-# endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
-# if __OPTION_EGLIBC_BIG_MACROS
-# define __libc_lock_lock(NAME) \
- ({ lll_lock (NAME, LLL_PRIVATE); 0; })
-# else
-# define __libc_lock_lock(NAME) \
- __libc_lock_lock_fn (&(NAME))
-# endif /* __OPTION_EGLIBC_BIG_MACROS */
-#else
-# define __libc_lock_lock(NAME) \
- __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
-#endif
-#define __libc_rwlock_rdlock(NAME) \
- __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
-#define __libc_rwlock_wrlock(NAME) \
- __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
-
/* Lock the recursive named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# if __OPTION_EGLIBC_BIG_MACROS != 1
@@ -291,31 +127,6 @@ libc_hidden_proto (__libc_lock_lock_recursive_fn);
__libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
#endif
-/* Try to lock the named lock variable. */
-#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
-# if __OPTION_EGLIBC_BIG_MACROS != 1
-/* EGLIBC: Declare wrapper function for a big macro if either
- !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
- small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2). */
-extern int __libc_lock_trylock_fn (__libc_lock_t *);
-libc_hidden_proto (__libc_lock_trylock_fn);
-# endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
-# if __OPTION_EGLIBC_BIG_MACROS
-# define __libc_lock_trylock(NAME) \
- lll_trylock (NAME)
-# else
-# define __libc_lock_trylock(NAME) \
- __libc_lock_trylock_fn (&(NAME))
-# endif /* __OPTION_EGLIBC_BIG_MACROS */
-#else
-# define __libc_lock_trylock(NAME) \
- __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
-#endif
-#define __libc_rwlock_tryrdlock(NAME) \
- __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
-#define __libc_rwlock_trywrlock(NAME) \
- __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
-
/* Try to lock the recursive named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# if __OPTION_EGLIBC_BIG_MACROS != 1
@@ -353,32 +164,6 @@ libc_hidden_proto (__libc_lock_trylock_recursive_fn);
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
#endif
-#define __rtld_lock_trylock_recursive(NAME) \
- __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
-
-/* Unlock the named lock variable. */
-#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
-# if __OPTION_EGLIBC_BIG_MACROS != 1
-/* EGLIBC: Declare wrapper function for a big macro if either
- !__OPTION_EGLIBC_BIG_MACROS, or we are using a back door from
- small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2). */
-extern void __libc_lock_unlock_fn (__libc_lock_t *);
-libc_hidden_proto (__libc_lock_unlock_fn);
-# endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
-# if __OPTION_EGLIBC_BIG_MACROS
-# define __libc_lock_unlock(NAME) \
- lll_unlock (NAME, LLL_PRIVATE)
-# else
-# define __libc_lock_unlock(NAME) \
- __libc_lock_unlock_fn (&(NAME))
-# endif /* __OPTION_EGLIBC_BIG_MACROS */
-#else
-# define __libc_lock_unlock(NAME) \
- __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
-#endif
-#define __libc_rwlock_unlock(NAME) \
- __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
-
/* Unlock the recursive named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# if __OPTION_EGLIBC_BIG_MACROS != 1
@@ -407,58 +192,10 @@ libc_hidden_proto (__libc_lock_unlock_recursive_fn);
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
#endif
-#if defined _LIBC && defined SHARED
-# define __rtld_lock_default_lock_recursive(lock) \
- ++((pthread_mutex_t *)(lock))->__data.__count;
-
-# define __rtld_lock_default_unlock_recursive(lock) \
- --((pthread_mutex_t *)(lock))->__data.__count;
-
-# define __rtld_lock_lock_recursive(NAME) \
- GL(dl_rtld_lock_recursive) (&(NAME).mutex)
-
-# define __rtld_lock_unlock_recursive(NAME) \
- GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
-#else
-# define __rtld_lock_lock_recursive(NAME) \
- __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
-
-# define __rtld_lock_unlock_recursive(NAME) \
- __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
-#endif
-
-/* Define once control variable. */
-#if PTHREAD_ONCE_INIT == 0
-/* Special case for static variables where we can avoid the initialization
- if it is zero. */
-# define __libc_once_define(CLASS, NAME) \
- CLASS pthread_once_t NAME
-#else
-# define __libc_once_define(CLASS, NAME) \
- CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
-#endif
-
-/* Call handler iff the first call. */
-#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
- do { \
- if (PTFAVAIL (__pthread_once)) \
- __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
- INIT_FUNCTION)); \
- else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
- INIT_FUNCTION (); \
- (ONCE_CONTROL) |= 2; \
- } \
- } while (0)
-
-
/* Note that for I/O cleanup handling we are using the old-style
- cancel handling. It does not have to be integrated with C++ snce
+ cancel handling. It does not have to be integrated with C++ since
no C++ code is called in the middle. The old-style handling is
faster and the support is not going away. */
-extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
- void (*routine) (void *), void *arg);
-extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
- int execute);
extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
void (*routine) (void *), void *arg);
extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
@@ -489,170 +226,11 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
_buffer.__routine (_buffer.__arg); \
}
-/* Sometimes we have to exit the block in the middle. */
-#define __libc_cleanup_end(DOIT) \
- if (_avail) { \
- __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
- } else if (DOIT) \
- _buffer.__routine (_buffer.__arg)
-
-
-/* Normal cleanup handling, based on C cleanup attribute. */
-__extern_inline void
-__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
-{
- if (f->__do_it)
- f->__cancel_routine (f->__cancel_arg);
-}
-
-#define __libc_cleanup_push(fct, arg) \
- do { \
- struct __pthread_cleanup_frame __clframe \
- __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
- = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
- .__do_it = 1 };
-
-#define __libc_cleanup_pop(execute) \
- __clframe.__do_it = (execute); \
- } while (0)
-
-
-/* Create thread-specific key. */
-#define __libc_key_create(KEY, DESTRUCTOR) \
- __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
-
-/* Get thread-specific data. */
-#define __libc_getspecific(KEY) \
- __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
-
-/* Set thread-specific data. */
-#define __libc_setspecific(KEY, VALUE) \
- __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
-
-
-/* Register handlers to execute before and after `fork'. Note that the
- last parameter is NULL. The handlers registered by the libc are
- never removed so this is OK. */
-#define __libc_atfork(PREPARE, PARENT, CHILD) \
- __register_atfork (PREPARE, PARENT, CHILD, NULL)
-extern int __register_atfork (void (*__prepare) (void),
- void (*__parent) (void),
- void (*__child) (void),
- void *__dso_handle);
-
-/* Functions that are used by this file and are internal to the GNU C
- library. */
-
-extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
- const pthread_mutexattr_t *__mutex_attr);
-
-extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
-
-extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
-
-extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
-
-extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
-
-extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
-
-extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
-
-extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
- int __kind);
-#ifdef __USE_UNIX98
-extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
- const pthread_rwlockattr_t *__attr);
-
-extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
-
-extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
-
-extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
-
-extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
-
-extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
-
-extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
-#endif
-
-extern int __pthread_key_create (pthread_key_t *__key,
- void (*__destr_function) (void *));
-
-extern int __pthread_setspecific (pthread_key_t __key,
- const void *__pointer);
-
-extern void *__pthread_getspecific (pthread_key_t __key);
-
-extern int __pthread_once (pthread_once_t *__once_control,
- void (*__init_routine) (void));
-
-extern int __pthread_atfork (void (*__prepare) (void),
- void (*__parent) (void),
- void (*__child) (void));
-
-
-
-/* Make the pthread functions weak so that we can elide them from
- single-threaded processes. */
-#ifndef __NO_WEAK_PTHREAD_ALIASES
-# ifdef weak_extern
-# if _LIBC
-# include <bp-sym.h>
-# else
-# define BP_SYM(sym) sym
-# endif
-weak_extern (BP_SYM (__pthread_mutex_init))
-weak_extern (BP_SYM (__pthread_mutex_destroy))
-weak_extern (BP_SYM (__pthread_mutex_lock))
-weak_extern (BP_SYM (__pthread_mutex_trylock))
-weak_extern (BP_SYM (__pthread_mutex_unlock))
-weak_extern (BP_SYM (__pthread_mutexattr_init))
-weak_extern (BP_SYM (__pthread_mutexattr_destroy))
-weak_extern (BP_SYM (__pthread_mutexattr_settype))
-weak_extern (BP_SYM (__pthread_rwlock_init))
-weak_extern (BP_SYM (__pthread_rwlock_destroy))
-weak_extern (BP_SYM (__pthread_rwlock_rdlock))
-weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
-weak_extern (BP_SYM (__pthread_rwlock_wrlock))
-weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
-weak_extern (BP_SYM (__pthread_rwlock_unlock))
-weak_extern (BP_SYM (__pthread_key_create))
-weak_extern (BP_SYM (__pthread_setspecific))
-weak_extern (BP_SYM (__pthread_getspecific))
-weak_extern (BP_SYM (__pthread_once))
-weak_extern (__pthread_initialize)
-weak_extern (__pthread_atfork)
-weak_extern (BP_SYM (_pthread_cleanup_push_defer))
-weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
-weak_extern (BP_SYM (pthread_setcancelstate))
-# else
-# pragma weak __pthread_mutex_init
-# pragma weak __pthread_mutex_destroy
-# pragma weak __pthread_mutex_lock
-# pragma weak __pthread_mutex_trylock
-# pragma weak __pthread_mutex_unlock
-# pragma weak __pthread_mutexattr_init
-# pragma weak __pthread_mutexattr_destroy
-# pragma weak __pthread_mutexattr_settype
-# pragma weak __pthread_rwlock_destroy
-# pragma weak __pthread_rwlock_rdlock
-# pragma weak __pthread_rwlock_tryrdlock
-# pragma weak __pthread_rwlock_wrlock
-# pragma weak __pthread_rwlock_trywrlock
-# pragma weak __pthread_rwlock_unlock
-# pragma weak __pthread_key_create
-# pragma weak __pthread_setspecific
-# pragma weak __pthread_getspecific
-# pragma weak __pthread_once
-# pragma weak __pthread_initialize
-# pragma weak __pthread_atfork
-# pragma weak _pthread_cleanup_push_defer
-# pragma weak _pthread_cleanup_pop_restore
-# pragma weak pthread_setcancelstate
-# endif
+/* Hide the definitions which are only supposed to be used inside libc in
+ a separate file. This file is not present in the installation! */
+#ifdef _LIBC
+# include "libc-lockP.h"
#endif
#endif /* bits/libc-lock.h */
diff --git a/libc/nptl/sysdeps/pthread/bits/libc-lockP.h b/libc/nptl/sysdeps/pthread/bits/libc-lockP.h
new file mode 100644
index 000000000..55c95af2c
--- /dev/null
+++ b/libc/nptl/sysdeps/pthread/bits/libc-lockP.h
@@ -0,0 +1,472 @@
+/* Private libc-internal interface for mutex locks. NPTL version.
+ Copyright (C) 1996-2003, 2005, 2007, 2012 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef _BITS_LIBC_LOCKP_H
+#define _BITS_LIBC_LOCKP_H 1
+
+#include <pthread.h>
+#define __need_NULL
+#include <stddef.h>
+
+
+/* Fortunately Linux now has a mean to do locking which is realtime
+ safe without the aid of the thread library. We also need no fancy
+ options like error checking mutexes etc. We only need simple
+ locks, maybe recursive. This can be easily and cheaply implemented
+ using futexes. We will use them everywhere except in ld.so since
+ ld.so might be used on old kernels with a different libc.so. */
+#include <lowlevellock.h>
+#include <tls.h>
+#include <pthread-functions.h>
+#include <errno.h> /* For EBUSY. */
+#include <gnu/option-groups.h> /* For __OPTION_EGLIBC_BIG_MACROS. */
+
+/* Mutex type. */
+#if defined NOT_IN_libc && !defined IS_IN_libpthread
+typedef pthread_mutex_t __libc_lock_t;
+#else
+typedef int __libc_lock_t;
+#endif
+typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
+typedef pthread_rwlock_t __libc_rwlock_t;
+
+/* Type for key to thread-specific data. */
+typedef pthread_key_t __libc_key_t;
+
+/* Define a lock variable NAME with storage class CLASS. The lock must be
+ initialized with __libc_lock_init before it can be used (or define it
+ with __libc_lock_define_initialized, below). Use `extern' for CLASS to
+ declare a lock defined in another module. In public structure
+ definitions you must use a pointer to the lock structure (i.e., NAME
+ begins with a `*'), because its storage size will not be known outside
+ of libc. */
+#define __libc_lock_define(CLASS,NAME) \
+ CLASS __libc_lock_t NAME;
+#define __libc_rwlock_define(CLASS,NAME) \
+ CLASS __libc_rwlock_t NAME;
+#define __rtld_lock_define_recursive(CLASS,NAME) \
+ CLASS __rtld_lock_recursive_t NAME;
+
+/* Define an initialized lock variable NAME with storage class CLASS.
+
+ For the C library we take a deeper look at the initializer. For
+ this implementation all fields are initialized to zero. Therefore
+ we don't initialize the variable which allows putting it into the
+ BSS section. (Except on PA-RISC and other odd architectures, where
+ initialized locks must be set to one due to the lack of normal
+ atomic operations.) */
+
+#if !defined NOT_IN_libc || defined IS_IN_libpthread
+# if LLL_LOCK_INITIALIZER == 0
+# define __libc_lock_define_initialized(CLASS,NAME) \
+ CLASS __libc_lock_t NAME;
+# else
+# define __libc_lock_define_initialized(CLASS,NAME) \
+ CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
+# endif
+#else
+# if __LT_SPINLOCK_INIT == 0
+# define __libc_lock_define_initialized(CLASS,NAME) \
+ CLASS __libc_lock_t NAME;
+# else
+# define __libc_lock_define_initialized(CLASS,NAME) \
+ CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
+# endif
+#endif
+
+#define __libc_rwlock_define_initialized(CLASS,NAME) \
+ CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
+
+#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
+ CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
+#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
+ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
+
+#define __rtld_lock_initialize(NAME) \
+ (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
+
+/* If we check for a weakly referenced symbol and then perform a
+ normal jump to it te code generated for some platforms in case of
+ PIC is unnecessarily slow. What would happen is that the function
+ is first referenced as data and then it is called indirectly
+ through the PLT. We can make this a direct jump. */
+#ifdef __PIC__
+# define __libc_maybe_call(FUNC, ARGS, ELSE) \
+ (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
+ _fn != NULL ? (*_fn) ARGS : ELSE; }))
+#else
+# define __libc_maybe_call(FUNC, ARGS, ELSE) \
+ (FUNC != NULL ? FUNC ARGS : ELSE)
+#endif
+
+/* Call thread functions through the function pointer table. */
+#if defined SHARED && !defined NOT_IN_libc
+# define PTFAVAIL(NAME) __libc_pthread_functions_init
+# define __libc_ptf_call(FUNC, ARGS, ELSE) \
+ (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
+# define __libc_ptf_call_always(FUNC, ARGS) \
+ PTHFCT_CALL (ptr_##FUNC, ARGS)
+#else
+# define PTFAVAIL(NAME) (NAME != NULL)
+# define __libc_ptf_call(FUNC, ARGS, ELSE) \
+ __libc_maybe_call (FUNC, ARGS, ELSE)
+# define __libc_ptf_call_always(FUNC, ARGS) \
+ FUNC ARGS
+#endif
+
+
+/* Initialize the named lock variable, leaving it in a consistent, unlocked
+ state. */
+#if !defined NOT_IN_libc || defined IS_IN_libpthread
+# define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
+#else
+# define __libc_lock_init(NAME) \
+ __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
+#endif
+#if defined SHARED && !defined NOT_IN_libc
+/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
+ inefficient. */
+# define __libc_rwlock_init(NAME) \
+ (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
+#else
+# define __libc_rwlock_init(NAME) \
+ __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
+#endif
+
+#define __rtld_lock_init_recursive(NAME) \
+ do { \
+ if (__pthread_mutex_init != NULL) \
+ { \
+ pthread_mutexattr_t __attr; \
+ __pthread_mutexattr_init (&__attr); \
+ __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
+ __pthread_mutex_init (&(NAME).mutex, &__attr); \
+ __pthread_mutexattr_destroy (&__attr); \
+ } \
+ } while (0)
+
+/* Finalize the named lock variable, which must be locked. It cannot be
+ used again until __libc_lock_init is called again on it. This must be
+ called on a lock variable before the containing storage is reused. */
+#if !defined NOT_IN_libc || defined IS_IN_libpthread
+# define __libc_lock_fini(NAME) ((void) 0)
+#else
+# define __libc_lock_fini(NAME) \
+ __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
+#endif
+#if defined SHARED && !defined NOT_IN_libc
+# define __libc_rwlock_fini(NAME) ((void) 0)
+#else
+# define __libc_rwlock_fini(NAME) \
+ __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
+#endif
+
+/* Lock the named lock variable. */
+#if !defined NOT_IN_libc || defined IS_IN_libpthread
+# if __OPTION_EGLIBC_BIG_MACROS != 1
+/* EGLIBC: Declare wrapper function for a big macro if either
+ !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
+ small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2). */
+extern void __libc_lock_lock_fn (__libc_lock_t *);
+libc_hidden_proto (__libc_lock_lock_fn);
+# endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
+# if __OPTION_EGLIBC_BIG_MACROS
+# define __libc_lock_lock(NAME) \
+ ({ lll_lock (NAME, LLL_PRIVATE); 0; })
+# else
+# define __libc_lock_lock(NAME) \
+ __libc_lock_lock_fn (&(NAME))
+# endif /* __OPTION_EGLIBC_BIG_MACROS */
+#else
+# define __libc_lock_lock(NAME) \
+ __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
+#endif
+#define __libc_rwlock_rdlock(NAME) \
+ __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
+#define __libc_rwlock_wrlock(NAME) \
+ __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
+
+/* Try to lock the named lock variable. */
+#if !defined NOT_IN_libc || defined IS_IN_libpthread
+# if __OPTION_EGLIBC_BIG_MACROS != 1
+/* EGLIBC: Declare wrapper function for a big macro if either
+ !__OPTION_EGLIBC_BIG_MACROS or we are using a back door from
+ small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2). */
+extern int __libc_lock_trylock_fn (__libc_lock_t *);
+libc_hidden_proto (__libc_lock_trylock_fn);
+# endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
+# if __OPTION_EGLIBC_BIG_MACROS
+# define __libc_lock_trylock(NAME) \
+ lll_trylock (NAME)
+# else
+# define __libc_lock_trylock(NAME) \
+ __libc_lock_trylock_fn (&(NAME))
+# endif /* __OPTION_EGLIBC_BIG_MACROS */
+#else
+# define __libc_lock_trylock(NAME) \
+ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
+#endif
+#define __libc_rwlock_tryrdlock(NAME) \
+ __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
+#define __libc_rwlock_trywrlock(NAME) \
+ __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
+
+#define __rtld_lock_trylock_recursive(NAME) \
+ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
+
+/* Unlock the named lock variable. */
+#if !defined NOT_IN_libc || defined IS_IN_libpthread
+# if __OPTION_EGLIBC_BIG_MACROS != 1
+/* EGLIBC: Declare wrapper function for a big macro if either
+ !__OPTION_EGLIBC_BIG_MACROS, or we are using a back door from
+ small-macros-fns.c (__OPTION_EGLIBC_BIG_MACROS == 2). */
+extern void __libc_lock_unlock_fn (__libc_lock_t *);
+libc_hidden_proto (__libc_lock_unlock_fn);
+# endif /* __OPTION_EGLIBC_BIG_MACROS != 1 */
+# if __OPTION_EGLIBC_BIG_MACROS
+# define __libc_lock_unlock(NAME) \
+ lll_unlock (NAME, LLL_PRIVATE)
+# else
+# define __libc_lock_unlock(NAME) \
+ __libc_lock_unlock_fn (&(NAME))
+# endif /* __OPTION_EGLIBC_BIG_MACROS */
+#else
+# define __libc_lock_unlock(NAME) \
+ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
+#endif
+#define __libc_rwlock_unlock(NAME) \
+ __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
+
+#ifdef SHARED
+# define __rtld_lock_default_lock_recursive(lock) \
+ ++((pthread_mutex_t *)(lock))->__data.__count;
+
+# define __rtld_lock_default_unlock_recursive(lock) \
+ --((pthread_mutex_t *)(lock))->__data.__count;
+
+# define __rtld_lock_lock_recursive(NAME) \
+ GL(dl_rtld_lock_recursive) (&(NAME).mutex)
+
+# define __rtld_lock_unlock_recursive(NAME) \
+ GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
+#else
+# define __rtld_lock_lock_recursive(NAME) \
+ __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
+
+# define __rtld_lock_unlock_recursive(NAME) \
+ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
+#endif
+
+/* Define once control variable. */
+#if PTHREAD_ONCE_INIT == 0
+/* Special case for static variables where we can avoid the initialization
+ if it is zero. */
+# define __libc_once_define(CLASS, NAME) \
+ CLASS pthread_once_t NAME
+#else
+# define __libc_once_define(CLASS, NAME) \
+ CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
+#endif
+
+/* Call handler iff the first call. */
+#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
+ do { \
+ if (PTFAVAIL (__pthread_once)) \
+ __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
+ INIT_FUNCTION)); \
+ else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
+ INIT_FUNCTION (); \
+ (ONCE_CONTROL) |= 2; \
+ } \
+ } while (0)
+
+
+/* Note that for I/O cleanup handling we are using the old-style
+ cancel handling. It does not have to be integrated with C++ snce
+ no C++ code is called in the middle. The old-style handling is
+ faster and the support is not going away. */
+extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
+ void (*routine) (void *), void *arg);
+extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
+ int execute);
+extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
+ void (*routine) (void *), void *arg);
+extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
+ int execute);
+
+/* Sometimes we have to exit the block in the middle. */
+#define __libc_cleanup_end(DOIT) \
+ if (_avail) { \
+ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
+ } else if (DOIT) \
+ _buffer.__routine (_buffer.__arg)
+
+
+/* Normal cleanup handling, based on C cleanup attribute. */
+__extern_inline void
+__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
+{
+ if (f->__do_it)
+ f->__cancel_routine (f->__cancel_arg);
+}
+
+#define __libc_cleanup_push(fct, arg) \
+ do { \
+ struct __pthread_cleanup_frame __clframe \
+ __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
+ = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
+ .__do_it = 1 };
+
+#define __libc_cleanup_pop(execute) \
+ __clframe.__do_it = (execute); \
+ } while (0)
+
+
+/* Create thread-specific key. */
+#define __libc_key_create(KEY, DESTRUCTOR) \
+ __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
+
+/* Get thread-specific data. */
+#define __libc_getspecific(KEY) \
+ __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
+
+/* Set thread-specific data. */
+#define __libc_setspecific(KEY, VALUE) \
+ __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
+
+
+/* Register handlers to execute before and after `fork'. Note that the
+ last parameter is NULL. The handlers registered by the libc are
+ never removed so this is OK. */
+#define __libc_atfork(PREPARE, PARENT, CHILD) \
+ __register_atfork (PREPARE, PARENT, CHILD, NULL)
+extern int __register_atfork (void (*__prepare) (void),
+ void (*__parent) (void),
+ void (*__child) (void),
+ void *__dso_handle);
+
+/* Functions that are used by this file and are internal to the GNU C
+ library. */
+
+extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
+ const pthread_mutexattr_t *__mutex_attr);
+
+extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
+
+extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
+
+extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
+ int __kind);
+
+extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
+ const pthread_rwlockattr_t *__attr);
+
+extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_key_create (pthread_key_t *__key,
+ void (*__destr_function) (void *));
+
+extern int __pthread_setspecific (pthread_key_t __key,
+ const void *__pointer);
+
+extern void *__pthread_getspecific (pthread_key_t __key);
+
+extern int __pthread_once (pthread_once_t *__once_control,
+ void (*__init_routine) (void));
+
+extern int __pthread_atfork (void (*__prepare) (void),
+ void (*__parent) (void),
+ void (*__child) (void));
+
+
+
+/* Make the pthread functions weak so that we can elide them from
+ single-threaded processes. */
+#ifndef __NO_WEAK_PTHREAD_ALIASES
+# ifdef weak_extern
+# include <bp-sym.h>
+weak_extern (BP_SYM (__pthread_mutex_init))
+weak_extern (BP_SYM (__pthread_mutex_destroy))
+weak_extern (BP_SYM (__pthread_mutex_lock))
+weak_extern (BP_SYM (__pthread_mutex_trylock))
+weak_extern (BP_SYM (__pthread_mutex_unlock))
+weak_extern (BP_SYM (__pthread_mutexattr_init))
+weak_extern (BP_SYM (__pthread_mutexattr_destroy))
+weak_extern (BP_SYM (__pthread_mutexattr_settype))
+weak_extern (BP_SYM (__pthread_rwlock_init))
+weak_extern (BP_SYM (__pthread_rwlock_destroy))
+weak_extern (BP_SYM (__pthread_rwlock_rdlock))
+weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
+weak_extern (BP_SYM (__pthread_rwlock_wrlock))
+weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
+weak_extern (BP_SYM (__pthread_rwlock_unlock))
+weak_extern (BP_SYM (__pthread_key_create))
+weak_extern (BP_SYM (__pthread_setspecific))
+weak_extern (BP_SYM (__pthread_getspecific))
+weak_extern (BP_SYM (__pthread_once))
+weak_extern (__pthread_initialize)
+weak_extern (__pthread_atfork)
+weak_extern (BP_SYM (_pthread_cleanup_push_defer))
+weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
+weak_extern (BP_SYM (pthread_setcancelstate))
+# else
+# pragma weak __pthread_mutex_init
+# pragma weak __pthread_mutex_destroy
+# pragma weak __pthread_mutex_lock
+# pragma weak __pthread_mutex_trylock
+# pragma weak __pthread_mutex_unlock
+# pragma weak __pthread_mutexattr_init
+# pragma weak __pthread_mutexattr_destroy
+# pragma weak __pthread_mutexattr_settype
+# pragma weak __pthread_rwlock_destroy
+# pragma weak __pthread_rwlock_rdlock
+# pragma weak __pthread_rwlock_tryrdlock
+# pragma weak __pthread_rwlock_wrlock
+# pragma weak __pthread_rwlock_trywrlock
+# pragma weak __pthread_rwlock_unlock
+# pragma weak __pthread_key_create
+# pragma weak __pthread_setspecific
+# pragma weak __pthread_getspecific
+# pragma weak __pthread_once
+# pragma weak __pthread_initialize
+# pragma weak __pthread_atfork
+# pragma weak _pthread_cleanup_push_defer
+# pragma weak _pthread_cleanup_pop_restore
+# pragma weak pthread_setcancelstate
+# endif
+#endif
+
+#endif /* bits/libc-lockP.h */
diff --git a/libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c b/libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
index 903830ea7..36e00426d 100644
--- a/libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
+++ b/libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
@@ -1,5 +1,5 @@
/* sem_timedwait -- wait on a semaphore. Generic futex-using version.
- Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2007, 2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -83,7 +83,6 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
}
/* Already timed out? */
- err = -ETIMEDOUT;
if (sec < 0)
{
__set_errno (ETIMEDOUT);
diff --git a/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_post.c b/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_post.c
index 0b829927d..28e06f632 100644
--- a/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_post.c
+++ b/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_post.c
@@ -30,7 +30,7 @@ __new_sem_post (sem_t *sem)
{
struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
- int nr = atomic_increment_val (&isem->value);
+ atomic_increment (&isem->value);
atomic_full_barrier ();
if (isem->nwaiters > 0)
{
@@ -53,10 +53,11 @@ attribute_compat_text_section
__old_sem_post (sem_t *sem)
{
struct sparc_old_sem *isem = (struct sparc_old_sem *) sem;
+ int err;
- int nr = atomic_increment_val (&isem->value);
- int err = lll_futex_wake (&isem->value, 1,
- isem->private ^ FUTEX_PRIVATE_FLAG);
+ atomic_increment (&isem->value);
+ err = lll_futex_wake (&isem->value, 1,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);
diff --git a/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_timedwait.c b/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_timedwait.c
index 7b4e8241a..8e56bc9c1 100644
--- a/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_timedwait.c
+++ b/libc/nptl/sysdeps/unix/sysv/linux/sparc/sem_timedwait.c
@@ -1,5 +1,5 @@
/* sem_timedwait -- wait on a semaphore. Generic futex-using version.
- Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2007, 2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -83,7 +83,6 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
}
/* Already timed out? */
- err = -ETIMEDOUT;
if (sec < 0)
{
__set_errno (ETIMEDOUT);
diff --git a/libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c b/libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
index 5da15742c..c9fcd595f 100644
--- a/libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
+++ b/libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
@@ -1,5 +1,5 @@
/* sem_timedwait -- wait on a semaphore. SPARC version.
- Copyright (C) 2003, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2006, 2007, 2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -102,7 +102,6 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
}
/* Already timed out? */
- err = -ETIMEDOUT;
if (sec < 0)
{
__set_errno (ETIMEDOUT);