* sysdeps/powerpc/bits/atomic.h [! __powerpc64__]
(__arch_atomic_decrement_if_positive_64): Fix bogus definition.
+2003-03-28 Kaz Kojima <kkojima@rr.iij4u.or.jp>
+
+ * sysdeps/sh/bits/atomic.h (__arch_compare_and_exchange_val_8_acq):
+ Return old value. Make asm output reg constraint earlyclobber.
+ Renamed from...
+ (__arch_compare_and_exchange_8_acq): ... this.
+ (__arch_compare_and_exchange_val_16_acq):
+ Return old value. Make asm output reg constraint earlyclobber.
+ Renamed from...
+ (__arch_compare_and_exchange_16_acq): ... this.
+ (__arch_compare_and_exchange_val_32_acq):
+ Return old value. Make asm output reg constraint earlyclobber.
+ Renamed from...
+ (__arch_compare_and_exchange_32_acq): ... this.
+ (__arch_compare_and_exchange_val_64_acq):
+ Renamed from...
+ (__arch_compare_and_exchange_64_acq): ... this.
+ (atomic_exchange_and_add): Use local variables and
+ __arch_compare_and_exchange_val_64_acq.
+ (atomic_add): Likewise.
+ (atomic_add_negative, atomic_add_zero): Use local variables.
+
2003-03-28 Alexandre Oliva <aoliva@redhat.com>
* sysdeps/unix/mips/sysdep.S: Include sys/asm.h.
- * sysdeps/unix/sysv/linux/mips/configure: Rebuilt.
-
2003-03-27 Ulrich Drepper <drepper@redhat.com>
- * Makefile: Remove libmd5script goal.
+ * Makefile: Remove libmd5crypt goal.
2003-03-25 Jakub Jelinek <jakub@redhat.com>
+2003-03-28 Kaz Kojima <kkojima@rr.iij4u.or.jp>
+
+ * sysdeps/sh/tls.h: Include nptl/descr.h after the definition
+ of TLS_DTV_AT_TP.
+ (INSTALL_DTV): Add parens.
+ (THREAD_GETMEM, THREAD_GETMEM_NC, THREAD_SETMEM, THREAD_SETMEM_NC):
+ Use passed descr instead of THREAD_SELF.
+ * sysdeps/unix/sysv/linux/sh/lowlevelmutex.S
+ (__lll_mutex_timedlock_wait): Correct expected value after
+ spurious wakeup.
+ * sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S:
+ Release lock before waking up the waiters.
+ * sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Correct exit
+ criteria. Reorderstruct passed to cleanup handler. Fix
+ handling of cancellation and failung pthread_mutex_unlock call.
+ Use __pthread_enable_asynccancel_2 instead of
+ __pthread_enable_asynccancel.
+ * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Likewise.
+ Return result of lock re-get if it fails.
+ * sysdeps/unix/sysv/linux/sh/pthread_once.S: Fix wrong argument
+ for __pthread_cleanup_push.
+ * sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S: Fix
+ completely broken rwlock implementation.
+ * sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S: Likewise.
+ * sysdeps/unix/sysv/linux/sh/sem_post.S: Fix error value. Use
+ versioned_symbol macro.
+ * sysdeps/unix/sysv/linux/sh/sem_trywait.S: Use versioned_symbol macro.
+ * sysdeps/unix/sysv/linux/sh/sem_wait.S: Likewise.
+
2003-03-27 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/kernel-posix-timers.h: Don't declare
trapa #0x14
SYSCALL_INST_PAD
- mov r0, r1
-
mov.l @r8, r0
tst r0, r0
bf 1f
mov r5, r8
mov r6, r9
mov r4, r10
- add #1, r10
/* Stack frame for the timespec and timeval structs. */
add #-8, r15
1:
+ add #1, r10
+
/* Get current time. */
mov r15, r4
mov #0, r5
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
+ mov r0, r4
- mov #1, r3
- XADD (r3, @r8, r10)
- tst r10, r10
+ mov #1, r10
+ XADD (r10, @r8, r3)
+ tst r3, r3
bf 7f
mov #2, r1
7:
/* Check whether the time expired. */
mov #-ETIMEDOUT, r1
- cmp/eq r0, r1
+ cmp/eq r4, r1
bt 5f
bra 1b
nop
bf 4f
3:
- /* Case all currently waiting threads to wake up. */
+ /* Cause all currently waiting threads to recognize they are
+ woken up. */
mov.l r1, @(wakeup_seq,r8)
mov.l r0, @(wakeup_seq+4,r8)
+ /* Unlock. */
+#if cond_lock != 0
+ DEC (@(cond_lock,r8), r2)
+#else
+ DEC (@r8, r2)
+#endif
+ tst r2, r2
+ bf 7f
+8:
/* Wake up all threads. */
mov r8, r4
add #wakeup_seq, r4
trapa #0x14
SYSCALL_INST_PAD
+ mov #0, r0
+ lds.l @r15+, pr
+ rts
+ mov.l @r15+, r8
+
4:
/* Unlock. */
#if cond_lock != 0
nop
5:
- /* Unlock in loop requires waekup. */
+ /* Unlock in loop requires wakeup. */
mov r8, r4
#if cond_lock != 0
add #cond_lock, r4
bra 6b
nop
+7:
+ /* Unlock in loop requires wakeup. */
+ mov r8, r4
+#if cond_lock != 0
+ add #cond_lock, r4
+#endif
+ mov.l .Lmwake6, r1
+ bsrf r1
+ nop
+.Lmwake6b:
+ bra 8b
+ nop
+
.align 2
.Lmwait5:
.long __lll_mutex_lock_wait-.Lmwait5b
.Lmwake5:
.long __lll_mutex_unlock_wake-.Lmwake5b
+.Lmwake6:
+ .long __lll_mutex_unlock_wake-.Lmwake6b
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
GLIBC_2_3_2)
mov.l r9, @-r15
mov.l r8, @-r15
sts.l pr, @-r15
- add #-48, r15
+ add #-64, r15
mov r4, r8
mov r5, r9
mov r6, r10
mov r9, r4
.Lmunlock1b:
+ tst r0, r0
+ bf 16f
+
mov #1, r2
mov #0, r3
mov.l .Lccleanup1, r5
#endif
mov r15, r4
- add #28, r4
+ add #36, r4
mov.l .Lccpush1, r1
bsrf r1
- mov r8, r6
+ mov r15, r6
.Lccpush1b:
/* Get and store current wakeup_seq value. */
mov.l @(wakeup_seq,r8), r0
mov.l @(wakeup_seq+4,r8), r1
- mov.l r0, @(12,r15)
- mov.l r1, @(16,r15)
+ mov.l r0, @(20,r15)
+ mov.l r1, @(24,r15)
+ /* Prepare structure passed to cancellation handler. */
+ mov.l r9, @r15
+ mov.l r8, @(4,r15)
/* Unlock. */
8:
bra 3f
nop
4:
+ mov r15, r4
mov.l .Lenable1, r1
bsrf r1
- nop
+ add #8, r4
+
.Lenable1b:
- mov.l r0, @r15
/* Get current time. */
mov r15, r4
- add #4, r4
+ add #12, r4
mov #0, r5
mov #SYS_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
/* Compute relative timeout. */
- mov.l @(8,r15), r0
+ mov.l @(16,r15), r0
mov.w .L1k, r1
dmulu.l r0, r1 /* Milli seconds to nano seconds. */
mov.l @r10, r2
mov.l @(4,r10), r3
- mov.l @(4,r15), r0
+ mov.l @(12,r15), r0
sts macl, r1
sub r0, r2
clrt
bf 13f /* Time is already up. */
/* Store relative timeout. */
- mov.l r2, @(4,r15)
- mov.l r3, @(8,r15)
+ mov.l r2, @(12,r15)
+ mov.l r3, @(16,r15)
mov r15, r7
- add #4, r7
+ add #12, r7
mov #FUTEX_WAIT, r5
- mov.l @(12,r15), r6
+ mov.l @(20,r15), r6
mov r8, r4
add #wakeup_seq, r4
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
- mov.l r0, @(20,r15)
+ mov.l r0, @(28,r15)
mov.l .Ldisable1, r1
bsrf r1
- mov.l @r15, r4
+ mov.l @(8,r15), r4
.Ldisable1b:
/* Lock. */
mov.l @(wakeup_seq,r8), r2
mov.l @(wakeup_seq+4,r8), r3
- mov.l @(16,r15), r5
- cmp/hi r5, r1
+ mov.l @(24,r15), r5
+ cmp/hi r5, r3
bt 7f
- cmp/hi r1, r5
+ cmp/hi r3, r5
bt 15f
- mov.l @(12,r15), r5
- cmp/hi r0, r5
+ mov.l @(20,r15), r5
+ cmp/hs r2, r5
bt 15f
7:
cmp/hi r1, r3
cmp/hi r0, r2
bt 9f
15:
- mov.l @(20,r15),r0
+ mov.l @(28,r15),r0
cmp/eq #-ETIMEDOUT, r0
bf 8b
13:
mov.l r1,@(wakeup_seq+4,r8)
mov #ETIMEDOUT, r0
bra 14f
- mov.l r0, @(24,r15)
+ mov.l r0, @(32,r15)
9:
mov #0, r0
- mov.l r0, @(24,r15)
+ mov.l r0, @(32,r15)
14:
mov #1, r2
mov #0, r3
11:
/* Remove cancellation handler. */
mov r15, r4
- add #28, r4
+ add #36, r4
mov.l .Lcpop1, r1
bsrf r1
mov #0, r5
mov #0, r5
.Lmlocki1b:
- mov.l @(24,r15), r0
+ /* We return the result of the mutex_lock operation if it failed. */
+ tst r0, r0
+ bf 18f
+ mov.l @(32,r15), r0
- add #48, r15
-
- /* We return the result of the mutex_lock operation. */
+18:
+ add #64, r15
lds.l @r15+, pr
mov.l @r15+, r8
mov.l @r15+, r9
.Lccpush1:
.long __pthread_cleanup_push-.Lccpush1b
.Lenable1:
- .long __pthread_enable_asynccancel-.Lenable1b
+ .long __pthread_enable_asynccancel_2-.Lenable1b
.Ldisable1:
.long __pthread_disable_asynccancel-.Ldisable1b
.Lcpop1:
bra 11b
nop
+16:
+ /* The initial unlocking of the mutex failed. */
+ mov.l r0, @(32,r15)
+#if cond_lock != 0
+ DEC (@(cond_lock,r8), r2)
+#else
+ DEC (@r8, r2)
+#endif
+ tst r2, r2
+ bf 17f
+
+ mov r8, r4
+#if cond_lock != 0
+ add #cond_lock, r4
+#endif
+ mov.l .Lmwake4, r1
+ bsrf r1
+ nop
+.Lmwake4b:
+17:
+ bra 18b
+ mov.l @(32,r15), r0
+
.align 2
.Lmwait2:
.long __lll_mutex_lock_wait-.Lmwait2b
.long __lll_mutex_lock_wait-.Lmwait3b
.Lmwake3:
.long __lll_mutex_unlock_wake-.Lmwake3b
+.Lmwake4:
+ .long __lll_mutex_unlock_wake-.Lmwake4b
.size __pthread_cond_timedwait, .-__pthread_cond_timedwait
versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
GLIBC_2_3_2)
.hidden __condvar_cleanup
__condvar_cleanup:
mov.l r8, @-r15
+ mov.l r9, @-r15
sts.l pr, @-r15
- mov r4, r8
+ mov r4, r9
+ mov.l @(4,r9), r8
/* Get internal lock. */
mov #1, r3
nop
.Lwake0b:
2:
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+ mov r8, r4
+ add #wakeup_seq, r4
+ mov #FUTEX_WAKE, r5
+ mov #-1, r6
+ shlr r6 /* r6 = 0x7fffffff */
+ mov #0, r7
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+
+ /* Lock the mutex unless asynchronous cancellation is in effect. */
+ mov.l @(8,r9), r0
+ and #2, r0
+ tst r0, r0
+ bf 3f
+
+ mov.l .Lmlocki1, r1
+ bsrf r1
+ mov.l @r9, r4
+.Lmlocki1b:
+
+3:
lds.l @r15+, pr
+ mov.l @r15+, r9
rts
mov.l @r15+, r8
.long __lll_mutex_lock_wait-.Lwait0b
.Lwake0:
.long __lll_mutex_unlock_wake-.Lwake0b
+.Lmlocki1:
+ .long __pthread_mutex_lock_internal-.Lmlocki1b
.size __condvar_cleanup, .-__condvar_cleanup
mov.l r9, @-r15
mov.l r8, @-r15
sts.l pr, @-r15
- add #-32, r15
+ add #-48, r15
mov r4, r8
mov r5, r9
mov r9, r4
.Lmunlock0b:
+ tst r0, r0
+ bf 12f
+
mov #1, r2
mov #0, r3
mov.l .Lccleanup0, r5
#endif
mov r15, r4
- add #12, r4
+ add #20, r4
mov.l .Lccpush0, r1
bsrf r1
- mov r8, r6
+ mov r15, r6
.Lccpush0b:
/* Get and store current wakeup_seq value. */
mov.l @(wakeup_seq,r8), r0
mov.l @(wakeup_seq+4,r8), r1
- mov.l r0, @(4,r15)
- mov.l r1, @(8,r15)
+ mov.l r0, @(12,r15)
+ mov.l r1, @(16,r15)
+ /* Prepare structure passed to cancellation handler. */
+ mov.l r9, @r15
+ mov.l r8, @(4,r15)
8:
/* Unlock. */
tst r2, r2
bf 3f
4:
+ mov r15, r4
mov.l .Lenable0, r1
bsrf r1
- nop
+ add #8, r4
.Lenable0b:
- mov.l r0, @r15
mov #0, r7
mov #FUTEX_WAIT, r5
- mov.l @(4,r15), r6
+ mov.l @(12,r15), r6
mov r8, r4
add #wakeup_seq, r4
mov #SYS_futex, r3
mov.l .Ldisable0, r1
bsrf r1
- mov.l @r15, r4
+ mov.l @(8,r15), r4
.Ldisable0b:
/* Lock. */
mov.l @(wakeup_seq,r8), r2
mov.l @(wakeup_seq+4,r8), r3
- mov.l @(8,r15), r5
- cmp/hi r5, r1
+ mov.l @(16,r15), r5
+ cmp/hi r5, r3
bt 7f
- cmp/hi r1, r5
+ cmp/hi r3, r5
bt 8b
- mov.l @(4,r15), r5
- cmp/hi r0, r5
+ mov.l @(12,r15), r5
+ cmp/hs r2, r5
bt 8b
7:
cmp/hi r1, r3
11:
/* Remove cancellation handler. */
mov r15, r4
- add #12, r4
+ add #20, r4
mov.l .Lcpop0, r1
bsrf r1
mov #0, r5
bsrf r1
mov #0, r5
.Lmlocki0b:
-
- add #32, r15
-
/* We return the result of the mutex_lock operation. */
+
+14:
+ add #48, r15
lds.l @r15+, pr
mov.l @r15+, r8
mov.l @r15+, r9
.Lccpush0:
.long __pthread_cleanup_push-.Lccpush0b
.Lenable0:
- .long __pthread_enable_asynccancel-.Lenable0b
+ .long __pthread_enable_asynccancel_2-.Lenable0b
.Ldisable0:
.long __pthread_disable_asynccancel-.Ldisable0b
.Lcpop0:
bra 11b
nop
+12:
+ /* The initial unlocking of the mutex failed. */
+ mov.l r0, @-r15
+#if cond_lock != 0
+ DEC (@(cond_lock,r8), r2)
+#else
+ DEC (@r8, r2)
+#endif
+ tst r2, r2
+ bf 13f
+
+ mov r8, r4
+#if cond_lock != 0
+ add #cond_lock, r4
+#endif
+ mov.l .Lmwake2, r1
+ bsrf r1
+ nop
+.Lmwake2b:
+
+13:
+ bra 14b
+ mov.l @r15+, r0
+
.align 2
.Lmwait0:
.long __lll_mutex_lock_wait-.Lmwait0b
.long __lll_mutex_lock_wait-.Lmwait1b
.Lmwake1:
.long __lll_mutex_unlock_wake-.Lmwake1b
+.Lmwake2:
+ .long __lll_mutex_unlock_wake-.Lmwake2b
.size __pthread_cond_wait, .-__pthread_cond_wait
versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
GLIBC_2_3_2)
#endif
mov.l .Lcpush, r1
bsrf r1
- mov r3, r6
+ mov r9, r6
.Lcpush0:
jsr @r8
nop
mov r8, r4
add #READERS_WAKEUP, r4
mov #FUTEX_WAIT, r5
- mov #0, r6
+ mov.l @(READERS_WAKEUP,r8), r6
+ mov #0, r7
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
13:
mov.l @(READERS_QUEUED,r8), r0
add #-1, r0
- mov.l r0, @(READERS_QUEUED,r8)
- tst r0, r0
- bf 2b
bra 2b
- mov.l r0, @(READERS_WAKEUP,r8)
+ mov.l r0, @(READERS_QUEUED,r8)
5:
mov #0, r3
stc gbr, r1
mov.w .Ltcboff,r2
sub r2,r1
- mov.l @(8,r1),r1
cmp/eq r1, r0
bf 3b
/* Deadlock detected. */
tst r0, r0
bt 5f
3:
+ /* Check the value of the timeout parameter. */
+ mov.l .L1g0, r1
+ mov.l @(4,r9), r0
+ cmp/hs r1, r0
+ bt 19f
+
mov.l @(READERS_QUEUED,r8), r0
add #1, r0
mov.l r0, @(READERS_QUEUED,r8)
/* Futex call. */
mov r15, r7
mov #FUTEX_WAIT, r5
- mov #0, r6
+ mov.l @(READERS_WAKEUP,r8), r6
mov r8, r4
add #READERS_WAKEUP, r4
mov #SYS_futex, r3
bf 12f
13:
- mov #-ETIMEDOUT, r0
- cmp/eq r0, r3
- bt 18f
mov.l @(READERS_QUEUED,r8), r0
add #-1, r0
mov.l r0, @(READERS_QUEUED,r8)
- tst r0, r0
+ mov #-ETIMEDOUT, r0
+ cmp/eq r0, r3
bf 2b
- bra 2b
- mov.l r0, @(READERS_WAKEUP,r8)
+
+18:
+ bra 9f
+ mov #ETIMEDOUT, r3
5:
mov #0, r3
stc gbr, r1
mov.w .Ltcboff,r2
sub r2,r1
- mov.l @(8,r1),r1
cmp/eq r1, r0
bf 3b
/* Deadlock detected. */
bra 17b
mov #-ETIMEDOUT, r3
-18:
+19:
bra 9b
- mov #ETIMEDOUT, r3
+ mov #EINVAL, r3
.Ltcboff:
.word TLS_PRE_TCB_SIZE
tst r0, r0
bt 5f
3:
+ /* Check the value of the timeout parameter. */
+ mov.l .L1g1, r1
+ mov.l @(4,r9), r0
+ cmp/hs r1, r0
+ bt 19f
+
mov.l @(WRITERS_QUEUED,r8), r0
add #1, r0
mov.l r0, @(WRITERS_QUEUED,r8)
/* Futex call. */
mov r15, r7
mov #FUTEX_WAIT, r5
- mov #0, r6
+ mov.l @(WRITERS_WAKEUP,r8), r6
mov r8, r4
add #WRITERS_WAKEUP, r4
mov #SYS_futex, r3
bf 12f
13:
- mov #-ETIMEDOUT, r0
- cmp/eq r0, r3
- bt 18f
mov.l @(WRITERS_QUEUED,r8), r0
add #-1, r0
mov.l r0, @(WRITERS_QUEUED,r8)
- mov #0, r0
- bra 2b
- mov.l r0, @(WRITERS_WAKEUP,r8)
+ mov #-ETIMEDOUT, r0
+ cmp/eq r0, r3
+ bf 2b
+
+18:
+ bra 9f
+ mov #ETIMEDOUT, r3
+
+19:
+ bra 9f
+ mov #EINVAL, r3
5:
mov #0, r3
- stc gbr, r1
+ stc gbr, r0
mov.w .Ltcboff,r2
- sub r2,r1
- mov.l @(8,r1),r0
+ sub r2,r0
mov.l r0, @(WRITER,r8)
9:
#if MUTEX == 0
stc gbr, r1
mov.w .Ltcboff,r2
sub r2,r1
- mov.l @(8,r1),r1
cmp/eq r1, r0
bf 3b
bra 9b
bra 17b
mov #-ETIMEDOUT, r3
-18:
- bra 9b
- mov #ETIMEDOUT, r3
-
.Ltcboff:
.word TLS_PRE_TCB_SIZE
.align 2
5:
mov #0, r0
mov.l r0, @(WRITER,r8)
+ mov #1, r6
mov r8, r4
+ add #WRITERS_WAKEUP, r4
mov.l @(WRITERS_QUEUED,r8), r0
tst r0, r0
- bf 9f
+ bf 0f
+
+ /* If also no readers waiting nothing to do. */
+ mov.l @(READERS_QUEUED,r8), r0
+ tst r0, r0
+ bt 6f
+
mov #-1, r6
shlr r6 /* r6 = 0x7fffffff */
- bra 0f
- add #READERS_WAKEUP, r4
-9:
- mov #1, r6
- add #WRITERS_WAKEUP, r4
+ mov r8, r4
+ add #READERS_WAKEUP, r4
+
0:
+ mov.l @r4, r0
+ add #1, r0
+ mov.l r0, @r4
+#if MUTEX == 0
+ DEC (@r8, r2)
+#else
+ DEC (@(MUTEX,r8), r2)
+#endif
+ tst r2, r2
+ bf 7f
+
+8:
mov #FUTEX_WAKE, r5
mov #SYS_futex, r3
+ mov #0, r7
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
+ lds.l @r15+, pr
+ mov.l @r15+, r8
+ mov.l @r15+, r12
+ rts
+ mov #0, r0
6:
#if MUTEX == 0
DEC (@r8, r2)
bra 4b
nop
+7:
+ mov.l r4, @-r15
+ mov.l r6, @-r15
+ mov r8, r4
+#if MUTEX != 0
+ add #MUTEX, r4
+#endif
+ mov.l .Lwake9, r1
+ bsrf r1
+ nop
+.Lwake9b:
+
+ mov.l @r15+, r6
+ bra 8b
+ mov.l @r15+, r4
+
.align 2
.Lwait8:
.long __lll_mutex_lock_wait-.Lwait8b
.Lwake8:
.long __lll_mutex_unlock_wake-.Lwake8b
+.Lwake9:
+ .long __lll_mutex_unlock_wake-.Lwake9b
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
.globl pthread_rwlock_unlock
mov r8, r4
add #WRITERS_WAKEUP, r4
mov #FUTEX_WAIT, r5
- mov #0, r6
+ mov.l @(WRITERS_WAKEUP,r8), r6
+ mov #0, r7
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
tst r2, r2
bf 12f
13:
- mov.l @(READERS_QUEUED,r8), r0
+ mov.l @(WRITERS_QUEUED,r8), r0
add #-1, r0
- mov.l r0, @(READERS_QUEUED,r8)
- mov #0, r0
bra 2b
- mov.l r0, @(WRITERS_WAKEUP,r8)
+ mov.l r0, @(WRITERS_QUEUED,r8)
5:
mov #0, r3
- stc gbr, r1
+ stc gbr, r0
mov.w .Ltcboff,r2
- sub r2,r1
- mov.l @(8,r1),r0
+ sub r2,r0
mov.l r0, @(WRITER,r8)
9:
#if MUTEX == 0
stc gbr, r1
mov.w .Ltcboff,r2
sub r2,r1
- mov.l @(8,r1),r1
cmp/eq r1, r0
bf 3b
bra 9b
mov #0, r0
1:
- mov #EAGAIN, r2
+ mov #EINVAL, r2
mova .Lgot3, r0
mov.l .Lgot3, r12
add r0, r12
.long __errno_location@PLT-(.Lerrloc3b+2-.)
#endif
.size __new_sem_post,.-__new_sem_post
- .symver __new_sem_post, sem_post@@GLIBC_2.2
+ versioned_symbol(libpthread, __new_sem_post, sem_post, GLIBC_2_1)
.long __errno_location@PLT-(.Lerrloc1b+2-.)
#endif
.size __new_sem_trywait,.-__new_sem_trywait
- .symver __new_sem_trywait, sem_trywait@@GLIBC_2.2
+ versioned_symbol(libpthread, __new_sem_trywait, sem_trywait, GLIBC_2_1)
.long __errno_location@PLT-(.Lerrloc0b+2-.)
#endif
.size __new_sem_wait,.-__new_sem_wait
- .symver __new_sem_wait, sem_wait@@GLIBC_2.2
+ versioned_symbol(libpthread, __new_sem_wait, sem_wait, GLIBC_2_1)
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
- ({ unsigned char __result; \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) __result; \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
mov r15,r1\n\
mov #-8,r15\n\
- 0: mov.b @%1,r2\n\
- cmp/eq r2,%3\n\
+ 0: mov.b @%1,%0\n\
+ cmp/eq %0,%3\n\
bf 1f\n\
mov.b %2,@%1\n\
- 1: mov r1,r15\n\
- mov #-1,%0\n\
- negc %0,%0"\
- : "=r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
- : "r0", "r1", "r2", "t", "memory"); \
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
+ : "r0", "r1", "t", "memory"); \
__result; })
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
- ({ unsigned char __result; \
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) __result; \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
mov r15,r1\n\
mov #-8,r15\n\
- 0: mov.w @%1,r2\n\
- cmp/eq r2,%3\n\
+ 0: mov.w @%1,%0\n\
+ cmp/eq %0,%3\n\
bf 1f\n\
mov.w %2,@%1\n\
- 1: mov r1,r15\n\
- mov #-1,%0\n\
- negc %0,%0"\
- : "=r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
- : "r0", "r1", "r2", "t", "memory"); \
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
+ : "r0", "r1", "t", "memory"); \
__result; })
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
- ({ unsigned char __result; \
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) __result; \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
mov r15,r1\n\
mov #-8,r15\n\
- 0: mov.l @%1,r2\n\
- cmp/eq r2,%3\n\
+ 0: mov.l @%1,%0\n\
+ cmp/eq %0,%3\n\
bf 1f\n\
mov.l %2,@%1\n\
- 1: mov r1,r15\n\
- mov #-1,%0\n\
- negc %0,%0"\
- : "=r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
- : "r0", "r1", "r2", "t", "memory"); \
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
+ : "r0", "r1", "t", "memory"); \
__result; })
/* XXX We do not really need 64-bit compare-and-exchange. At least
problems since not many other 32-bit architectures have support for
such an operation. So don't define any code for now. */
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
(abort (), 0)
#define atomic_exchange_and_add(mem, value) \
({ __typeof (*mem) __result; \
+ __typeof (value) __value; \
if (sizeof (*mem) == 1) \
__asm __volatile ("\
.align 2\n\
add %0,%1\n\
mov.b %1,@%2\n\
1: mov r1,r15"\
- : "=&r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=&r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "memory"); \
else if (sizeof (*mem) == 2) \
__asm __volatile ("\
add %0,%1\n\
mov.w %1,@%2\n\
1: mov r1,r15"\
- : "=&r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=&r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "memory"); \
else if (sizeof (*mem) == 4) \
__asm __volatile ("\
add %0,%1\n\
mov.l %1,@%2\n\
1: mov r1,r15"\
- : "=&r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=&r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "memory"); \
else \
{ \
__typeof (value) addval = (value); \
- __typeof (*mem) oldval; \
__typeof (mem) memp = (mem); \
do \
- __result = (oldval = *memp) + addval; \
- while (!__arch_compare_and_exchange_64_acq (memp, __result, oldval));\
+ __result = *memp; \
+ while (__arch_compare_and_exchange_val_64_acq \
+ (memp, __result + addval, __result) == __result); \
(void) addval; \
} \
__result; })
#define atomic_add(mem, value) \
- (void) ({ if (sizeof (*mem) == 1) \
+ (void) ({ __typeof (value) __value; \
+ if (sizeof (*mem) == 1) \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
add r2,%0\n\
mov.b %0,@%1\n\
1: mov r1,r15"\
- : "=&r" (value) : "r" (mem), "0" (value) \
+ : "=&r" (__value) : "r" (mem), "0" (value) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*mem) == 2) \
__asm __volatile ("\
add r2,%0\n\
mov.w %0,@%1\n\
1: mov r1,r15"\
- : "=&r" (value) : "r" (mem), "0" (value) \
+ : "=&r" (__value) : "r" (mem), "0" (value) \
: "r0", "r1", "r2", "memory"); \
else if (sizeof (*mem) == 4) \
__asm __volatile ("\
add r2,%0\n\
mov.l %0,@%1\n\
1: mov r1,r15"\
- : "=&r" (value) : "r" (mem), "0" (value) \
+ : "=&r" (__value) : "r" (mem), "0" (value) \
: "r0", "r1", "r2", "memory"); \
else \
{ \
__typeof (mem) memp = (mem); \
do \
oldval = *memp; \
- while (! __arch_compare_and_exchange_64_acq (memp, \
- oldval + addval, \
- oldval)); \
+ while (__arch_compare_and_exchange_val_64_acq \
+ (memp, oldval + addval, oldval) == oldval); \
(void) addval; \
} \
})
#define atomic_add_negative(mem, value) \
({ unsigned char __result; \
+ __typeof (value) __value; \
if (sizeof (*mem) == 1) \
__asm __volatile ("\
.align 2\n\
1: mov r1,r15\n\
shal %1\n\
movt %0"\
- : "=r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*mem) == 2) \
__asm __volatile ("\
1: mov r1,r15\n\
shal %1\n\
movt %0"\
- : "=r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*mem) == 4) \
__asm __volatile ("\
1: mov r1,r15\n\
shal %1\n\
movt %0"\
- : "=r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "r2", "t", "memory"); \
else \
abort (); \
#define atomic_add_zero(mem, value) \
({ unsigned char __result; \
+ __typeof (value) __value; \
if (sizeof (*mem) == 1) \
__asm __volatile ("\
.align 2\n\
1: mov r1,r15\n\
tst %1,%1\n\
movt %0"\
- : "=r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*mem) == 2) \
__asm __volatile ("\
1: mov r1,r15\n\
tst %1,%1\n\
movt %0"\
- : "=r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "r2", "t", "memory"); \
else if (sizeof (*mem) == 4) \
__asm __volatile ("\
1: mov r1,r15\n\
tst %1,%1\n\
movt %0"\
- : "=r" (__result), "=&r" (value) : "r" (mem), "1" (value) \
+ : "=r" (__result), "=&r" (__value) : "r" (mem), "1" (value) \
: "r0", "r1", "r2", "t", "memory"); \
else \
abort (); \