+2003-09-21 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Completely revamp the
+ locking macros. No distinction between normal and mutex locking
+ anymore.
+ * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Rewrite mutex
+ locking. Merge bits from lowlevelmutex.S we still need.
+ * sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S: Removed.
+ * Makefile (routines): Remove libc-lowlevelmutex.
+ (libpthread-rountines): Remove lowlevelmutex.
+ * pthread_barrier_wait.S: Adjust for new mutex implementation.
+ * pthread_cond_broadcast.S: Likewise.
+ * pthread_cond_timedwait.S: Likewise.
+ * pthread_cond_wait.S: Likewise.
+ * pthread_rwlock_rdlock.S: Likewise.
+ * pthread_rwlock_timedrdlock.S: Likewise.
+ * pthread_rwlock_timedwrlock.S: Likewise.
+ * pthread_rwlock_unlock.S: Likewise.
+ * pthread_rwlock_wrlock.S: Likewise.
+ * pthread_cond_signal.S: Likewise. Don't use requeue.
+
2003-09-20 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Don't match memory
#define FUTEX_WAKE 1
- .globl __lll_lock_wait
- .type __lll_lock_wait,@function
- .hidden __lll_lock_wait
+ .globl __lll_mutex_lock_wait
+ .type __lll_mutex_lock_wait,@function
+ .hidden __lll_mutex_lock_wait
.align 16
-__lll_lock_wait:
+__lll_mutex_lock_wait:
pushl %esi
pushl %ebx
pushl %edx
movl %ecx, %ebx
xorl %esi, %esi /* No timeout. */
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl $2, %edx
+
1:
- leal -1(%eax), %edx /* account for the preceeded xadd. */
+ movl $1, %eax
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ je 2f
+
movl $SYS_futex, %eax
ENTER_KERNEL
- orl $-1, %eax /* Load -1. */
- LOCK
- xaddl %eax, (%ebx)
- jne,pn 1b
+ xorl %eax, %eax
+2: LOCK
+ cmpxchgl %edx, (%ebx)
- movl $-1, (%ebx)
+ testl %eax, %eax
+ jne,pn 1b
popl %edx
popl %ebx
popl %esi
ret
- .size __lll_lock_wait,.-__lll_lock_wait
+ .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_mutex_timedlock_wait
+ .type __lll_mutex_timedlock_wait,@function
+ .hidden __lll_mutex_timedlock_wait
+ .align 16
+__lll_mutex_timedlock_wait:
+ /* Check for a valid timeout value. */
+ cmpl $1000000000, 4(%edx)
+ jae 3f
+
+ pushl %edi
+ pushl %esi
+ pushl %ebx
+ pushl %ebp
+
+ /* Stack frame for the timespec and timeval structs. */
+ subl $8, %esp
+
+ movl %ecx, %ebp
+ movl %edx, %edi
+
+1:
+ /* Get current time. */
+ movl %esp, %ebx
+ xorl %ecx, %ecx
+ movl $SYS_gettimeofday, %eax
+ ENTER_KERNEL
+
+ /* Compute relative timeout. */
+ movl 4(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ subl (%esp), %ecx
+ subl %eax, %edx
+ jns 4f
+ addl $1000000000, %edx
+ subl $1, %ecx
+4: testl %ecx, %ecx
+ js 5f /* Time is already up. */
+
+ /* Store relative timeout. */
+ movl %ecx, (%esp)
+ movl %edx, 4(%esp)
+
+ movl %ebp, %ebx
+
+ movl $1, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ je 8f
+
+ /* Futex call. */
+ movl %esp, %esi
+ xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ movl %eax, %ecx
+
+8:
+ xorl %eax, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ jne 7f
+
+6: addl $8, %esp
+ popl %ebp
+ popl %ebx
+ popl %esi
+ popl %edi
+ ret
+
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ je 5f
+ jmp 1b
+
+3: movl $EINVAL, %eax
+ ret
+
+5: movl $ETIMEDOUT, %eax
+ jmp 6b
+ .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+#endif
#ifdef NOT_IN_libc
movl 20(%esp), %ebx
LOCK
- addl $1, (%ebx)
- jng 1f
+ subl $1, (%ebx)
+ je 1f
- popl %edx
+ movl $FUTEX_WAKE, %ecx
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ movl $0, (%ebx)
+ ENTER_KERNEL
+
+1: popl %edx
popl %ecx
popl %ebx
ret
#endif
- .globl __lll_unlock_wake
- .type __lll_unlock_wake,@function
- .hidden __lll_unlock_wake
+ .globl __lll_mutex_unlock_wake
+ .type __lll_mutex_unlock_wake,@function
+ .hidden __lll_mutex_unlock_wake
.align 16
-__lll_unlock_wake:
+__lll_mutex_unlock_wake:
pushl %ebx
pushl %ecx
pushl %edx
movl %eax, %ebx
-1: movl $FUTEX_WAKE, %ecx
+ movl $0, (%eax)
+ movl $FUTEX_WAKE, %ecx
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
- movl %edx, (%ebx) /* Stores '$1'. */
ENTER_KERNEL
popl %edx
popl %ecx
popl %ebx
ret
- .size __lll_unlock_wake,.-__lll_unlock_wake
+ .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
#ifdef NOT_IN_libc
+++ /dev/null
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-
- .text
-
-#ifndef LOCK
-# ifdef UP
-# define LOCK
-# else
-# define LOCK lock
-# endif
-#endif
-
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
- .align 16
-__lll_mutex_lock_wait:
- pushl %esi
- pushl %ebx
- pushl %edx
-
- /* In the loop we are going to add 2 instead of 1 which is what
- the caller did. Account for that. */
- subl $1, %eax
-
- movl %ecx, %ebx
- xorl %esi, %esi /* No timeout. */
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
-1:
- leal 2(%eax), %edx /* account for the preceeded xadd. */
- movl $SYS_futex, %eax
- ENTER_KERNEL
-
- movl $2, %eax
- LOCK
- xaddl %eax, (%ebx)
- testl %eax, %eax
- jne,pn 1b
-
- popl %edx
- popl %ebx
- popl %esi
- ret
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
-
-#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
- .align 16
-__lll_mutex_timedlock_wait:
- /* Check for a valid timeout value. */
- cmpl $1000000000, 4(%edx)
- jae 3f
-
- pushl %edi
- pushl %esi
- pushl %ebx
- pushl %ebp
-
- /* In the loop we are going to add 2 instead of 1 which is what
- the caller did. Account for that. */
- subl $1, %eax
-
- /* Stack frame for the timespec and timeval structs. */
- subl $8, %esp
-
- movl %ecx, %ebp
- movl %edx, %edi
-
-1: leal 2(%eax), %esi
-
- /* Get current time. */
- movl %esp, %ebx
- xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
- ENTER_KERNEL
-
- /* Compute relative timeout. */
- movl 4(%esp), %eax
- movl $1000, %edx
- mul %edx /* Milli seconds to nano seconds. */
- movl (%edi), %ecx
- movl 4(%edi), %edx
- subl (%esp), %ecx
- subl %eax, %edx
- jns 4f
- addl $1000000000, %edx
- subl $1, %ecx
-4: testl %ecx, %ecx
- js 5f /* Time is already up. */
-
- /* Futex call. */
- movl %ecx, (%esp) /* Store relative timeout. */
- movl %edx, 4(%esp)
- movl %esi, %edx
- movl %esp, %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
- movl %ebp, %ebx
- movl $SYS_futex, %eax
- ENTER_KERNEL
- movl %eax, %ecx
-
- movl $2, %eax
- LOCK
- xaddl %eax, (%ebx)
- testl %eax, %eax
- jne 7f
-
- xorl %eax, %eax
-
-6: addl $8, %esp
- popl %ebp
- popl %ebx
- popl %esi
- popl %edi
- ret
-
- /* Check whether the time expired. */
-7: cmpl $-ETIMEDOUT, %ecx
- je 5f
- jmp 1b
-
-3: movl $EINVAL, %eax
- ret
-
-5: movl $ETIMEDOUT, %eax
- jmp 6b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
-#endif
-
-
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
- .align 16
-__lll_mutex_unlock_wake:
- pushl %ebx
- pushl %ecx
- pushl %edx
-
- movl %eax, %ebx
- movl $0, (%eax)
- movl $FUTEX_WAKE, %ecx
- movl $1, %edx /* Wake one thread. */
- movl $SYS_futex, %eax
- ENTER_KERNEL
-
- popl %edx
- popl %ecx
- popl %ebx
- ret
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
movl 8(%esp), %ebx
/* Get the mutex. */
- orl $-1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
+ testl %eax, %eax
jne 1f
/* One less waiter. If this was the last one needed wake
/* Release the mutex. */
LOCK
- addl $1, MUTEX(%ebx)
- jng 6f
+ subl $1, MUTEX(%ebx)
+ jne 6f
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
waking the waiting threads since otherwise a new thread might
arrive and gets waken up, too. */
LOCK
- addl $1, MUTEX(%ebx)
- jng 4f
+ subl $1, MUTEX(%ebx)
+ jne 4f
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
ret
1: leal MUTEX(%ebx), %ecx
- call __lll_lock_wait
+ call __lll_mutex_lock_wait
jmp 2b
4: leal MUTEX(%ebx), %eax
- call __lll_unlock_wake
+ call __lll_mutex_unlock_wake
jmp 5b
6: leal MUTEX(%ebx), %eax
- call __lll_unlock_wake
+ call __lll_mutex_unlock_wake
jmp 7b
.size pthread_barrier_wait,.-pthread_barrier_wait
movl 16(%esp), %ebx
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 1f
__pthread_cond_signal:
pushl %ebx
- pushl %esi
pushl %edi
- movl 16(%esp), %edi
+ movl 12(%esp), %edi
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%edi)
+ cmpxchgl %edx, (%edi)
#else
- xaddl %eax, cond_lock(%edi)
+ cmpxchgl %edx, cond_lock(%edi)
#endif
testl %eax, %eax
jne 1f
adcl $0, 4(%ebx)
/* Wake up one thread by moving it to the internal lock futex. */
- movl $FUTEX_REQUEUE, %ecx
+ movl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
- xorl %edx, %edx
- movl $1, %esi
+ movl $1, %edx
ENTER_KERNEL
-#ifndef __ASSUME_FUTEX_REQUEUE
- cmpl $-EINVAL, %eax
- je 7f
-#endif
-
- /* If we moved a thread we in any case have to make the syscall. */
- testl %eax, %eax
- jne 5f
-
/* Unlock. Note that at this point %edi always points to
cond_lock. */
4: LOCK
6: xorl %eax, %eax
popl %edi
- popl %esi
popl %ebx
ret
5: movl %edi, %eax
call __lll_mutex_unlock_wake
jmp 6b
-
-#ifndef __ASSUME_FUTEX_REQUEUE
-7: /* The futex requeue functionality is not available. */
- movl $1, %edx
- movl $FUTEX_WAKE, %ecx
- movl $SYS_futex, %eax
- ENTER_KERNEL
- jmp 4b
-#endif
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
GLIBC_2_3_2)
movl 28(%esp), %ebp
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 1f
.LcleanupEND:
/* Lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 5f
movl %eax, %esi
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
je 1f
movl 16(%esp), %ebx
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 1f
.LcleanupEND:
/* Lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 5f
movl %eax, %esi
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
je 1f
adcl $0, woken_seq+4(%ebx)
LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
subl $1, cond_lock(%ebx)
+#endif
je 2f
#if cond_lock == 0
movl 12(%esp), %ebx
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 1f
subl $READERS_WAKEUP, %ebx
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 12f
movl 32(%esp), %edi
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 1f
leal READERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %edx
+ movl %eax, %ecx
17:
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 12f
13: subl $1, READERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %edx
+ cmpl $-ETIMEDOUT, %ecx
jne 2b
18: movl $ETIMEDOUT, %ecx
call __lll_mutex_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %edx
+16: movl $-ETIMEDOUT, %ecx
jmp 17b
19: movl $EINVAL, %ecx
movl 32(%esp), %edi
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 1f
leal WRITERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %edx
+ movl %eax, %ecx
17:
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 12f
13: subl $1, WRITERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %edx
+ cmpl $-ETIMEDOUT, %ecx
jne 2b
18: movl $ETIMEDOUT, %ecx
call __lll_mutex_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %edx
+16: movl $-ETIMEDOUT, %ecx
jmp 17b
19: movl $EINVAL, %ecx
movl 12(%esp), %edi
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%edi)
+ cmpxchgl %edx, (%edi)
#else
- xaddl %eax, MUTEX(%edi)
+ cmpxchgl %edx, MUTEX(%edi)
#endif
testl %eax, %eax
jne 1f
movl 12(%esp), %ebx
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 1f
subl $WRITERS_WAKEUP, %ebx
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 12f
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
#ifdef PIC
const struct timespec *abstime)
__attribute ((regparm (3))) attribute_hidden;
/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wait (int *__futex)
+extern int __lll_mutex_unlock_wake (int *__futex)
__attribute ((regparm (1))) attribute_hidden;
({ unsigned char ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (futex) \
- : "r" (1), "m" (futex), "0" (0) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER) \
: "memory"); \
ret; })
#define lll_mutex_lock(futex) \
(void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"testl %0, %0\n\t" \
"jne _L_mutex_lock_%=\n\t" \
".subsection 1\n\t" \
".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
".previous\n" \
"1:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (1), "m" (futex) \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex) \
: "memory"); })
always wakeup waiters. */
#define lll_mutex_cond_lock(futex) \
(void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"testl %0, %0\n\t" \
"jne _L_mutex_cond_lock_%=\n\t" \
".subsection 1\n\t" \
".size _L_mutex_cond_lock_%=,.-_L_mutex_cond_lock_%=\n" \
".previous\n" \
"1:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (2), "m" (futex) \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (2), "m" (futex) \
: "memory"); })
#define lll_mutex_timedlock(futex, timeout) \
({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %3\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
"testl %0, %0\n\t" \
"jne _L_mutex_timedlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_mutex_timedlock_%=,@function\n" \
"_L_mutex_timedlock_%=:\n\t" \
"leal %3, %%ecx\n\t" \
- "movl %6, %%edx\n\t" \
+ "movl %7, %%edx\n\t" \
"call __lll_mutex_timedlock_wait\n\t" \
"jmp 1f\n\t" \
".size _L_mutex_timedlock_%=,.-_L_mutex_timedlock_%=\n"\
".previous\n" \
"1:" \
- : "=a" (result), "=&c" (ignore1), "=&d" (ignore2), \
+ : "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
"=m" (futex) \
- : "0" (1), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout) \
: "memory"); \
result; })
typedef int lll_lock_t;
/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (1)
-#define LLL_LOCK_INITIALIZER_LOCKED (0)
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
extern int __lll_lock_wait (int val, int *__futex)
/* The states of a lock are:
- 1 - untaken
- 0 - taken by one user
- <0 - taken by more users */
+ 0 - untaken
+ 1 - taken by one user
+ 2 - taken by more users */
#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1; setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (0), "m" (futex), "0" (1) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
- "jne _L_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_lock_%=,@function\n" \
- "_L_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_lock_%=,.-_L_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (-1), "m" (futex) \
- : "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "addl $1,%0\n\t" \
- "jng _L_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_unlock_%=,@function\n" \
- "_L_unlock_%=:\n\t" \
- "leal %0, %%eax\n\t" \
- "call __lll_unlock_wake\n\t" \
- "jmp 1f\n\t" \
- ".size _L_unlock_%=,.-_L_unlock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex) \
- : "memory"); })
+# define lll_trylock(futex) lll_mutex_trylock (futex)
+# define lll_lock(futex) lll_mutex_lock (futex)
+# define lll_unlock(futex) lll_mutex_unlock (futex)
#else
/* Special versions of the macros for use in libc itself. They avoid
the lock prefix when the thread library is not used.
"lock\n" \
"0:\tcmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (futex) \
- : "r" (0), "m" (futex), "0" (1), \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER), \
"i" (offsetof (tcbhead_t, multiple_threads)) \
: "memory"); \
ret; })
# define lll_lock(futex) \
(void) ({ int ignore1, ignore2; \
- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
+ __asm __volatile ("cmpl $0, %%gs:%P6\n\t" \
"je,pt 0f\n\t" \
"lock\n" \
- "0:\txaddl %0, %2\n\t" \
- "jne _L_lock_%=\n\t" \
+ "0:\tcmpxchgl %1, %2\n\t" \
+ "testl %0, %0\n\t" \
+ "jne _L_mutex_lock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_lock_%=,@function\n" \
- "_L_lock_%=:\n\t" \
+ ".type _L_mutex_lock_%=,@function\n" \
+ "_L_mutex_lock_%=:\n\t" \
"leal %2, %%ecx\n\t" \
- "call __lll_lock_wait\n\t" \
- "jmp 2f\n\t" \
- ".size _L_lock_%=,.-_L_lock_%=\n" \
+ "call __lll_mutex_lock_wait\n\t" \
+ "jmp 1f\n\t" \
+ ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
".previous\n" \
- "2:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (-1), "m" (futex), \
+ "1:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex), \
"i" (offsetof (tcbhead_t, multiple_threads)) \
: "memory"); })
__asm __volatile ("cmpl $0, %%gs:%P3\n\t" \
"je,pt 0f\n\t" \
"lock\n" \
- "0:\taddl $1,%0\n\t" \
- "jng _L_unlock_%=\n\t" \
+ "0:\tsubl $1,%0\n\t" \
+ "jne _L_mutex_unlock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_unlock_%=,@function\n" \
- "_L_unlock_%=:\n\t" \
+ ".type _L_mutex_unlock_%=,@function\n" \
+ "_L_mutex_unlock_%=:\n\t" \
"leal %0, %%eax\n\t" \
- "call __lll_unlock_wake\n\t" \
- "jmp 2f\n\t" \
- ".size _L_unlock_%=,.-_L_unlock_%=\n" \
+ "call __lll_mutex_unlock_wake\n\t" \
+ "jmp 1f\n\t" \
+ ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
".previous\n" \
- "2:" \
+ "1:" \
: "=m" (futex), "=&a" (ignore) \
: "m" (futex), \
"i" (offsetof (tcbhead_t, multiple_threads)) \
#define lll_islocked(futex) \
- (futex != 0)
+ (futex != LLL_LOCK_INITIALIZER)
/* The kernel notifies a process with uses CLONE_CLEARTID via futex