* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevelmutex.S: Removed.
* sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Adjust
for new mutex implementation.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Likewise
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
Don't use requeue.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise.
* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Completely revamp the
locking macros. No distinction between normal and mutex locking
anymore.
+ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Rewrite mutex
locking. Merge bits from lowlevelmutex.S we still need.
+ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S: Removed.
+ * sysdeps/unix/sysv/linux/x86_64/lowlevelmutex.S: Removed.
* Makefile (routines): Remove libc-lowlevelmutex.
(libpthread-rountines): Remove lowlevelmutex.
- * pthread_barrier_wait.S: Adjust for new mutex implementation.
- * pthread_cond_broadcast.S: Likewise.
- * pthread_cond_timedwait.S: Likewise.
- * pthread_cond_wait.S: Likewise.
- * pthread_rwlock_rdlock.S: Likewise.
- * pthread_rwlock_timedrdlock.S: Likewise.
- * pthread_rwlock_timedwrlock.S: Likewise.
- * pthread_rwlock_unlock.S: Likewise.
- * pthread_rwlock_wrlock.S: Likewise.
- * pthread_cond_signal.S: Likewise. Don't use requeue.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Adjust
+ for new mutex implementation.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+ Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+ Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Likewise
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
+ Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+ Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
+ Don't use requeue.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise.
2003-09-20 Ulrich Drepper <drepper@redhat.com>
extra-libs-others := $(extra-libs)
install-lib-ldscripts := libpthread.so
-routines = alloca_cutoff forward libc-lowlevellock libc-lowlevelmutex \
- libc-cancellation
+routines = alloca_cutoff forward libc-lowlevellock libc-cancellation
shared-only-routines = forward
libpthread-routines = init events version \
cleanup_defer_compat unwind \
pt-longjmp \
cancellation \
- lowlevellock lowlevelmutex \
+ lowlevellock \
pt-vfork \
ptw-write ptw-read ptw-close ptw-fcntl ptw-accept \
ptw-connect ptw-recv ptw-recvfrom ptw-recvmsg ptw-send \
2: LOCK
cmpxchgl %edx, (%ebx)
- testl %eax, %eax
- jne,pn 1b
+ jnz,pn 1b
popl %edx
popl %ebx
LOCK
cmpxchgl %edx, (%ebx)
- testl %eax, %eax
- jne 7f
+ jnz 7f
6: addl $8, %esp
popl %ebp
xorl %eax, %eax
LOCK
cmpxchgl %edx, MUTEX(%ebx)
- testl %eax, %eax
- jne 1f
+ jnz 1f
/* One less waiter. If this was the last one needed wake
everybody. */
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: addl $wakeup_seq, %ebx
movl total_seq+4-wakeup_seq(%ebx), %eax
#else
cmpxchgl %edx, cond_lock(%edi)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: leal wakeup_seq(%edi), %ebx
movl total_seq+4(%edi), %eax
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
/* Store the reference to the mutex. If there is already a
different value in there this is a bad user bug. */
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- jne 5f
+ jnz 5f
6: movl woken_seq(%ebx), %eax
movl woken_seq+4(%ebx), %ecx
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- je 1f
+ jz 1f
#if cond_lock == 0
movl %ebx, %ecx
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
/* Store the reference to the mutex. If there is already a
different value in there this is a bad user bug. */
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- jne 5f
+ jnz 5f
6: movl woken_seq(%ebx), %eax
movl woken_seq+4(%ebx), %ecx
#else
cmpxchgl %edx, cond_lock(%ebx)
#endif
- testl %eax, %eax
- je 1f
+ jz 1f
#if cond_lock == 0
movl %ebx, %ecx
#else
cmpxchgl %edx, MUTEX(%ebx)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: movl WRITER(%ebx), %eax
testl %eax, %eax
#else
cmpxchgl %edx, MUTEX(%ebx)
#endif
- testl %eax, %eax
- jne 12f
+ jnz 12f
13: subl $1, READERS_QUEUED(%ebx)
jmp 2b
#else
cmpxchgl %edx, MUTEX(%ebp)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: movl WRITER(%ebp), %eax
testl %eax, %eax
#else
cmpxchgl %edx, MUTEX(%ebp)
#endif
- testl %eax, %eax
- jne 12f
+ jnz 12f
13: subl $1, READERS_QUEUED(%ebp)
cmpl $-ETIMEDOUT, %ecx
#else
cmpxchgl %edx, MUTEX(%ebp)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: movl WRITER(%ebp), %eax
testl %eax, %eax
#else
cmpxchgl %edx, MUTEX(%ebp)
#endif
- testl %eax, %eax
- jne 12f
+ jnz 12f
13: subl $1, WRITERS_QUEUED(%ebp)
cmpl $-ETIMEDOUT, %ecx
#else
cmpxchgl %edx, MUTEX(%edi)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: cmpl $0, WRITER(%edi)
jne 5f
#else
cmpxchgl %edx, MUTEX(%ebx)
#endif
- testl %eax, %eax
- jne 1f
+ jnz 1f
2: movl WRITER(%ebx), %eax
testl %eax, %eax
#else
cmpxchgl %edx, MUTEX(%ebx)
#endif
- testl %eax, %eax
- jne 12f
+ jnz 12f
13: subl $1, WRITERS_QUEUED(%ebx)
jmp 2b
#define lll_mutex_lock(futex) \
(void) ({ int ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "testl %0, %0\n\t" \
- "jne _L_mutex_lock_%=\n\t" \
+ "jnz _L_mutex_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_mutex_lock_%=,@function\n" \
"_L_mutex_lock_%=:\n\t" \
#define lll_mutex_cond_lock(futex) \
(void) ({ int ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "testl %0, %0\n\t" \
- "jne _L_mutex_cond_lock_%=\n\t" \
+ "jnz _L_mutex_cond_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_mutex_cond_lock_%=,@function\n" \
"_L_mutex_cond_lock_%=:\n\t" \
#define lll_mutex_timedlock(futex, timeout) \
({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
- "testl %0, %0\n\t" \
- "jne _L_mutex_timedlock_%=\n\t" \
+ "jnz _L_mutex_timedlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_mutex_timedlock_%=,@function\n" \
"_L_mutex_timedlock_%=:\n\t" \
"je,pt 0f\n\t" \
"lock\n" \
"0:\tcmpxchgl %1, %2\n\t" \
- "testl %0, %0\n\t" \
- "jne _L_mutex_lock_%=\n\t" \
+ "jnz _L_mutex_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_mutex_lock_%=,@function\n" \
"_L_mutex_lock_%=:\n\t" \
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- /* Modified: %rax, %rsi. */
- .globl __lll_lock_wait
- .type __lll_lock_wait,@function
- .hidden __lll_lock_wait
+ .globl __lll_mutex_lock_wait
+ .type __lll_mutex_lock_wait,@function
+ .hidden __lll_mutex_lock_wait
.align 16
-__lll_lock_wait:
+__lll_mutex_lock_wait:
pushq %r10
pushq %rdx
xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ movq %r10, %rsi /* movq $FUTEX_WAIT, %rsi */
1:
- leaq -1(%rsi), %rdx /* account for the preceeded xadd. */
- movq %r10, %rsi /* movq $FUTEX_WAIT, %rsi */
+ movl $1, %eax
+ LOCK
+ cmpxchgl %edx, (%rdi)
+
+ testl %eax, %eax
+ je 2f
+
movq $SYS_futex, %rax
syscall
- orl $-1, %esi /* Load -1. */
- LOCK
- xaddl %esi, (%rdi)
- jne 1b
+ xorl %eax, %eax
+2: LOCK
+ cmpxchgl %edx, (%rdi)
- movl $-1, (%rdi)
+ jnz 1b
popq %rdx
popq %r10
retq
- .size __lll_lock_wait,.-__lll_lock_wait
+ .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_mutex_timedlock_wait
+ .type __lll_mutex_timedlock_wait,@function
+ .hidden __lll_mutex_timedlock_wait
+ .align 16
+__lll_mutex_timedlock_wait:
+ /* Check for a valid timeout value. */
+ cmpq $1000000000, 8(%rdx)
+ jae 3f
+
+ pushq %r12
+ pushq %r13
+ pushq %r14
+
+ /* Stack frame for the timespec and timeval structs. */
+ subq $16, %rsp
+
+ movq %rdi, %r12
+ movq %rdx, %r13
+
+1:
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorq %rsi, %rsi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ /* This is a regular function call, all calleer-save registers
+ might be clobbered. */
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movq $1000, %rdi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 4f
+ addq $1000000000, %rsi
+ decq %rdi
+4: testq %rdi, %rdi
+ js 5f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movl $1, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%r12)
+
+ testl %eax, %eax
+ je 8f
+
+ movq %rsp, %r10
+ xorq %rsi, %rsi /* movq $FUTEX_WAIT, %rsi */
+ movq %r12, %rdi
+ movq $SYS_futex, %rax
+ syscall
+ movq %rax, %rcx
+
+ movl $1, %eax
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 7f
+
+ movl $2, (%rdi)
+ xorl %eax, %eax
+
+8: addq $16, %rsp
+ popq %r14
+ popq %r13
+ popq %r12
+ retq
+
+ /* Check whether the time expired. */
+7: cmpq $-ETIMEDOUT, %rcx
+ je 5f
+ jmp 1b
+
+3: movl $EINVAL, %eax
+ retq
+
+5: movl $ETIMEDOUT, %eax
+ jmp 8b
+ .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+#endif
#ifdef NOT_IN_libc
#endif
- .globl __lll_unlock_wake
- .type __lll_unlock_wake,@function
- .hidden __lll_unlock_wake
+ .globl __lll_mutex_unlock_wake
+ .type __lll_mutex_unlock_wake,@function
+ .hidden __lll_mutex_unlock_wake
.align 16
-__lll_unlock_wake:
+__lll_mutex_unlock_wake:
pushq %rsi
pushq %rdx
-1: movq $FUTEX_WAKE, %rsi
+ movl $0, (%rdi)
+ movq $FUTEX_WAKE, %rsi
movl $1, %edx /* Wake one thread. */
movq $SYS_futex, %rax
- movl %edx, (%rdi) /* Stores '$1'. */
syscall
popq %rdx
popq %rsi
retq
- .size __lll_unlock_wake,.-__lll_unlock_wake
+ .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
#ifdef NOT_IN_libc
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
#define lll_futex_wait(futex, val) \
({ unsigned char ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (futex) \
- : "r" (1), "1" (futex), "0" (0) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER) \
: "memory"); \
ret; })
#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
- "testl %0, %0\n\t" \
- "jne 1f\n\t" \
+ (void) ({ int ignore1, ignore2, ignore3; \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
".subsection 1\n" \
"1:\tleaq %2, %%rdi\n\t" \
"subq $128, %%rsp\n\t" \
"jmp 2f\n\t" \
".previous\n" \
"2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex) \
- : "0" (1), "2" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); })
#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
- "testl %0, %0\n\t" \
- "jne 1f\n\t" \
+ (void) ({ int ignore1, ignore2, ignore3; \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
".subsection 1\n" \
"1:\tleaq %2, %%rdi\n\t" \
"subq $128, %%rsp\n\t" \
"jmp 2f\n\t" \
".previous\n" \
"2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex) \
- : "0" (2), "2" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
+ "=a" (ignore3) \
+ : "0" (2), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); })
#define lll_mutex_timedlock(futex, timeout) \
({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %4\n\t" \
- "testl %0, %0\n\t" \
- "jne 1f\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ "jnz 1f\n\t" \
".subsection 1\n" \
"1:\tleaq %4, %%rdi\n\t" \
- "movq %7, %%rdx\n\t" \
+ "movq %8, %%rdx\n\t" \
"subq $128, %%rsp\n\t" \
"callq __lll_mutex_timedlock_wait\n\t" \
"addq $128, %%rsp\n\t" \
"jmp 2f\n\t" \
".previous\n" \
"2:" \
- : "=a" (result), "=&D" (ignore1), "=&S" (ignore2), \
+ : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
- : "0" (1), "4" (futex), "m" (timeout) \
+ : "0" (0), "2" (1), "m" (futex), "m" (timeout) \
: "memory", "cx", "cc", "r10", "r11"); \
result; })
".previous\n" \
"2:" \
: "=m" (futex), "=&D" (ignore) \
- : "0" (futex) \
+ : "m" (futex) \
: "ax", "cx", "r11", "cc", "memory"); })
#define lll_mutex_islocked(futex) \
- (futex != 0)
+ (futex != LLL_MUTEX_LOCK_INITIALIZER)
/* We have a separate internal lock implementation which is not tied
typedef int lll_lock_t;
/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (1)
-#define LLL_LOCK_INITIALIZER_LOCKED (0)
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
-extern int __lll_lock_wait (int *__futex, int val) attribute_hidden;
-extern int __lll_unlock_wake (int *__futex) attribute_hidden;
extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
/* The states of a lock are:
- 1 - untaken
- 0 - taken by one user
- <0 - taken by more users */
+ 0 - untaken
+ 1 - taken by one user
+ 2 - taken by more users */
#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1; setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (0), "1" (futex), "0" (1) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %2, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_lock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex) \
- : "0" (-1), "2" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "incl %0\n\t" \
- "jng 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %0, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_unlock_wake\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=m" (futex), "=&D" (ignore) \
- : "0" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
+# define lll_trylock(futex) lll_mutex_trylock (futex)
+# define lll_lock(futex) lll_mutex_lock (futex)
+# define lll_unlock(futex) lll_mutex_unlock (futex)
#else
/* Special versions of the macros for use in libc itself. They avoid
the lock prefix when the thread library is not used.
"lock\n" \
"0:\tcmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (futex) \
- : "r" (0), "1" (futex), "0" (1) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER) \
: "memory"); \
ret; })
# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
+ (void) ({ int ignore1, ignore2, ignore3; \
__asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
"je 0f\n\t" \
"lock\n" \
- "0:\txaddl %0, %2\n\t" \
- "jne 1f\n\t" \
+ "0:\tcmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
".subsection 1\n" \
"1:\tleaq %2, %%rdi\n\t" \
"subq $128, %%rsp\n\t" \
- "callq __lll_lock_wait\n\t" \
+ "callq __lll_mutex_lock_wait\n\t" \
"addq $128, %%rsp\n\t" \
"jmp 2f\n\t" \
".previous\n" \
"2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex) \
- : "0" (-1), "2" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); })
# define lll_unlock(futex) \
__asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
"je 0f\n\t" \
"lock\n" \
- "0:\tincl %0\n\t" \
- "jng 1f\n\t" \
+ "0:\tdecl %0\n\t" \
+ "jne 1f\n\t" \
".subsection 1\n" \
"1:\tleaq %0, %%rdi\n\t" \
"subq $128, %%rsp\n\t" \
- "callq __lll_unlock_wake\n\t" \
+ "callq __lll_mutex_unlock_wake\n\t" \
"addq $128, %%rsp\n\t" \
"jmp 2f\n\t" \
".previous\n" \
#define lll_islocked(futex) \
- (futex != 0)
+ (futex != LLL_MUTEX_LOCK_INITIALIZER)
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
+++ /dev/null
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-
- .text
-
-#ifndef LOCK
-# ifdef UP
-# define LOCK
-# else
-# define LOCK lock
-# endif
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-/* For the calculation see asm/vsyscall.h. */
-#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
- .align 16
-__lll_mutex_lock_wait:
- pushq %r10
- pushq %rdx
-
- /* In the loop we are going to add 2 instead of 1 which is what
- the caller did. Account for that. */
- decq %rsi
-
- xorq %r10, %r10 /* No timeout. */
-
-1:
- leaq 2(%rsi), %rdx /* account for the preceeded xadd. */
- movq %r10, %rsi /* movq $FUTEX_WAIT, %rsi */
- movq $SYS_futex, %rax
- syscall
-
- movl $2, %esi
- LOCK
- xaddl %esi, (%rdi)
- testl %esi, %esi
- jne 1b
-
- popq %rdx
- popq %r10
- retq
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
-
-#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
- .align 16
-__lll_mutex_timedlock_wait:
- /* Check for a valid timeout value. */
- cmpq $1000000000, 8(%rdx)
- jae 3f
-
- pushq %r12
- pushq %r13
- pushq %r14
-
- /* Stack frame for the timespec and timeval structs. */
- subq $16, %rsp
-
- movq %rdi, %r12
- movq %rdx, %r13
-
-1: leaq 1(%rax), %r14
-
- /* Get current time. */
- movq %rsp, %rdi
- xorq %rsi, %rsi
- movq $VSYSCALL_ADDR_vgettimeofday, %rax
- /* This is a regular function call, all calleer-save registers
- might be clobbered. */
- callq *%rax
-
- /* Compute relative timeout. */
- movq 8(%rsp), %rax
- movq $1000, %rdi
- mul %rdi /* Milli seconds to nano seconds. */
- movq (%r13), %rdi
- movq 8(%r13), %rsi
- subq (%rsp), %rdi
- subq %rax, %rsi
- jns 4f
- addq $1000000000, %rsi
- decq %rdi
-4: testq %rdi, %rdi
- js 5f /* Time is already up. */
-
- /* Futex call. */
- movq %rdi, (%rsp) /* Store relative timeout. */
- movq %rsi, 8(%rsp)
-
- movl %r14d, %edx
- movq %rsp, %r10
- xorq %rsi, %rsi /* movq $FUTEX_WAIT, %rsi */
- movq %r12, %rdi
- movq $SYS_futex, %rax
- syscall
- movq %rax, %rcx
-
- movl $1, %eax
- LOCK
- xaddl %eax, (%rdi)
- testl %eax, %eax
- jne 7f
-
- movl $2, (%rdi)
- xorl %eax, %eax
-
-8: addq $16, %rsp
- popq %r14
- popq %r13
- popq %r12
- retq
-
- /* Check whether the time expired. */
-7: cmpq $-ETIMEDOUT, %rcx
- je 5f
- jmp 1b
-
-3: movl $EINVAL, %eax
- retq
-
-5: movl $ETIMEDOUT, %eax
- jmp 8b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
-#endif
-
-
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
- .align 16
-__lll_mutex_unlock_wake:
- pushq %rsi
- pushq %rdx
-
- movl $0, (%rdi)
- movq $FUTEX_WAKE, %rsi
- movl $1, %edx /* Wake one thread. */
- movq $SYS_futex, %rax
- syscall
-
- popq %rdx
- popq %rsi
- retq
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
.align 16
pthread_barrier_wait:
/* Get the mutex. */
- orl $-1, %esi
+ xorl %eax, %eax
+ movl $1, %esi
LOCK
- xaddl %esi, MUTEX(%rdi)
- jne 1f
+ cmpxchgl %esi, MUTEX(%rdi)
+ jnz 1f
/* One less waiter. If this was the last one needed wake
everybody. */
/* Release the mutex. */
LOCK
- addl $1, MUTEX(%rdi)
- jng 6f
+ decl MUTEX(%rdi)
+ jne 6f
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
waking the waiting threads since otherwise a new thread might
arrive and gets waken up, too. */
LOCK
- addl $1, MUTEX(%rdi)
- jng 4f
+ decl MUTEX(%rdi)
+ jne 4f
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
retq
1: addq $MUTEX, %rdi
- callq __lll_lock_wait
+ callq __lll_mutex_lock_wait
subq $MUTEX, %rdi
jmp 2b
4: addq $MUTEX, %rdi
- callq __lll_unlock_wake
+ callq __lll_mutex_unlock_wake
subq $MUTEX, %rdi
jmp 5b
6: addq $MUTEX, %rdi
- callq __lll_unlock_wake
+ callq __lll_mutex_unlock_wake
subq $MUTEX, %rdi
jmp 7b
.size pthread_barrier_wait,.-pthread_barrier_wait
/* Get internal lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: addq $wakeup_seq, %rdi
movq total_seq-wakeup_seq(%rdi), %rcx
/* Get internal lock. */
movq %rdi, %r8
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: addq $wakeup_seq, %rdi
movq total_seq(%r8), %rcx
addq $1, (%rdi)
/* Wake up one thread. */
- movq $FUTEX_REQUEUE, %rsi
+ movq $FUTEX_WAKE, %rsi
movq $SYS_futex, %rax
- xorq %rdx, %rdx
- movq $1, %r10
+ movq $1, %rdx
syscall
-#ifndef __ASSUME_FUTEX_REQUEUE
- cmpq $-EINVAL, %rax
- je 7f
-#endif
-
- /* If we moved a thread we in any case have to make the syscall. */
- testq %rax, %rax
- jne 5f
-
/* Unlock. */
4: LOCK
#if cond_lock == 0
#endif
callq __lll_mutex_unlock_wake
jmp 6b
-
-#ifndef __ASSUME_FUTEX_REQUEUE
-7: /* The futex requeue functionality is not available. */
- movq $1, %rdx
- movq $FUTEX_WAKE, %rsi
- movq $SYS_futex, %rax
- syscall
- jmp 4b
-#endif
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
GLIBC_2_3_2)
/* Get internal lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
/* Unlock the mutex. */
2: movq 16(%rsp), %rdi
/* Lock. */
movq 8(%rsp), %rdi
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
jne 5f
6: movq woken_seq(%rdi), %rax
movq %rdi, %r8
movq 8(%rdi), %rdi
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
- je 1f
+ jz 1f
#if cond_lock != 0
addq $cond_lock, %rdi
/* Get internal lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
jne 1f
/* Unlock the mutex. */
/* Lock. */
movq 8(%rsp), %rdi
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, cond_lock(%rdi)
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- testl %esi, %esi
- jne 5f
+ jnz 5f
6: movq woken_seq(%rdi), %rax
/* Get the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: movl WRITER(%rdi), %eax
testl %eax, %eax
/* Reget the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 12f
+ jnz 12f
13: decl READERS_QUEUED(%rdi)
jmp 2b
/* Get the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: movl WRITER(%r12), %eax
testl %eax, %eax
/* Reget the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%r12)
+ cmpxchgl %esi, (%r12)
#else
- xaddl %esi, MUTEX(%r12)
+ cmpxchgl %esi, MUTEX(%r12)
#endif
- testl %esi, %esi
- jne 12f
+ jnz 12f
13: decl READERS_QUEUED(%r12)
cmpq $-ETIMEDOUT, %rdx
/* Get the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: movl WRITER(%r12), %eax
testl %eax, %eax
/* Reget the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%r12)
+ cmpxchgl %esi, (%r12)
#else
- xaddl %esi, MUTEX(%r12)
+ cmpxchgl %esi, MUTEX(%r12)
#endif
- testl %esi, %esi
- jne 12f
+ jnz 12f
13: decl WRITERS_QUEUED(%r12)
cmpq $-ETIMEDOUT, %rdx
__pthread_rwlock_unlock:
/* Get the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: cmpq $0, WRITER(%rdi)
jne 5f
/* Get the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 1f
+ jnz 1f
2: movl WRITER(%rdi), %eax
testl %eax, %eax
/* Reget the lock. */
movl $1, %esi
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %esi, (%rdi)
+ cmpxchgl %esi, (%rdi)
#else
- xaddl %esi, MUTEX(%rdi)
+ cmpxchgl %esi, MUTEX(%rdi)
#endif
- testl %esi, %esi
- jne 12f
+ jnz 12f
13: decl WRITERS_QUEUED(%rdi)
jmp 2b