+2003-03-11 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
+ (__condvar_cleanup): Wake up all waiters in case we got signaled
+ after being woken up but before disabling asynchronous
+ cancellation.
+ * sysdeps/pthread/pthread_cond_wait.c (__condvar_cleanup): Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+ (__condvar_cleanup): Likewise.
+
+ * init.c (__NR_set_tid_address): If already defined, don't redefine.
+ Make it an error if architecture has no #if case. Add x86-64.
+
+ * sysdeps/unix/sysv/linux/x86_64/Makefile: Add flags for
+ pt-initfini.s generation.
+
+ * sysdeps/x86_64/tls.h: Include <asm/prctl.h>.
+ (TLS_INIT_TP): Fix typo.
+
2003-03-11 Jakub Jelinek <jakub@redhat.com>
* sysdeps/ia64/bits/atomic.h (atomic_exchange_and_add): Swap 2nd and
++cv->wakeup_seq;
++cv->woken_seq;
+ /* make sure no signal gets lost. */
+ FUTEX_WAKE(cv->wakeup_seq, ALL);
+
lll_unlock(cv->lock);
}
#include <shlib-compat.h>
+#ifndef __NR_set_tid_address
/* XXX For the time being... Once we can rely on the kernel headers
having the definition remove these lines. */
#if defined __s390__
# define __NR_set_tid_address 252
#elif defined __ia64__
# define __NR_set_tid_address 1233
-#else
+#elif defined __i386__
# define __NR_set_tid_address 258
+#elif defined __x86_64__
+# define __NR_set_tid_address 218
+#eli
+# error "define __NR_set_tid_address"
+#endif
#endif
pthread_mutex_t *mutex;
};
+
void
__attribute__ ((visibility ("hidden")))
__condvar_cleanup (void *arg)
++cbuffer->cond->__data.__wakeup_seq;
++cbuffer->cond->__data.__woken_seq;
+ /* Wake everybody to make sure no condvar signal gets lost. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ int *futex = ((int *) (&cbuffer->cond->__data.__wakeup_seq));
+#elif BYTE_ORDER == BIG_ENDIAN
+ int *futex = ((int *) (&cbuffer->cond->__data.__wakeup_seq)) + 1;
+#else
+# error "No valid byte order"
+#endif
+ lll_futex_wake (futex, INT_MAX);
+
/* We are done. */
lll_mutex_unlock (cbuffer->cond->__data.__lock);
#endif
call __lll_mutex_lock_wait
-1: addl $1, wakeup_seq(%ebx)
- adcl $0, wakeup_seq+4(%ebx)
+1: addl $wakeup_seq, %ebx
+ addl $1, (%ebx)
+ adcl $0, 4(%ebx)
- addl $1, woken_seq(%ebx)
- adcl $0, woken_seq+4(%ebx)
+ addl $1, woken_seq-wakeup_seq(%ebx)
+ adcl $0, woken_seq-wakeup_seq+4(%ebx)
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+ movl $FUTEX_WAKE, %ecx
+ movl $SYS_futex, %eax
+ movl $0x7fffffff, %edx
+ ENTER_KERNEL
LOCK
-#if cond_lock == 0
- subl $1, (%ebx)
-#else
- subl $1, cond_lock(%ebx)
-#endif
+ subl $1, cond_lock-wakeup_seq(%ebx)
je 2f
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
-#endif
+
+ leal cond_lock-wakeup_seq(%ebx), %eax
call __lll_mutex_unlock_wake
/* Lock the mutex unless asynchronous cancellation is in effect. */
ifeq ($(subdir),nptl)
+CFLAGS-pt-initfini.s = -g0 -fPIC -fno-inline-functions -fno-asynchronous-unwind-tables
+
# We need to make sure that stack memory is allocated in the low 4GB.
CFLAGS-pthread_create.c += -DARCH_MAP_FLAGS=MAP_32BIT
endif
the application is using threads. */
#ifndef UP
# define LOCK \
- cmpl $0, __libc_multiple_threads_ptr(%rip); \
+ cmpl $0, __libc_multiple_threads(%rip); \
je 0f; \
lock; \
0:
the application is using threads. */
#ifndef UP
# define LOCK \
- cmpl $0, __libc_multiple_threads_ptr(%rip); \
+ cmpl $0, __libc_multiple_threads(%rip); \
je 0f; \
lock; \
0:
addq $1, woken_seq(%rdi)
+ /* Wake up all waiters to make sure no signal gets lost. */
+ addq $wakeup_seq, %rdi
+ movq $FUTEX_WAKE, %rsi
+ movl $0x7fffffff, %edx
+ movq $SYS_futex, %rax
+ syscall
+ subq $wakeup_seq, %rdi
+
LOCK
#if cond_lock == 0
decl (%rdi)
#ifndef _TLS_H
#define _TLS_H 1
+#include <asm/prctl.h> /* For ARCH_SET_FS. */
#ifndef __ASSEMBLER__
# include <stddef.h>
# include <stdint.h>
: "=a" (_result) \
: "0" ((unsigned long int) __NR_arch_prctl), \
"D" ((unsigned long int) ARCH_SET_FS), \
- "S" (_descr) \
+ "S" (_thrdescr) \
: "memory", "cc", "r11", "cx"); \
\
_result ? "cannot set %fs base address for thread-local storage" : 0; \