+2003-02-06 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Remove wrong
+ but inactive generalization.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
+ Minor optimization, remove one instruction.
+ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise.
+
2003-02-04 Martin Schwidefsky <schwidefsky@de.ibm.com>
* sysdeps/unix/sysv/linux/s390/fork.c: Correct order of parameters.
pushl %ebx
movl 12(%esp), %ebx
-#if cond_lock != 0
- addl $cond_lock, %ebx
-#endif
/* Get internal lock. */
movl $1, %eax
movl %eax, wakeup_seq+4(%ebx)
/* Wake up all threads. */
- addl $wakeup_seq-cond_lock, %ebx
+ addl $wakeup_seq, %ebx
movl $FUTEX_WAKE, %ecx
xorl %esi, %esi
movl $SYS_futex, %eax
movl $0x7fffffff, %edx
ENTER_KERNEL
- subl $wakeup_seq-cond_lock, %ebx
-
/* Unlock. */
4: LOCK
-#if cond_lock == 0
- decl (%ebx)
-#else
- decl cond_lock(%ebx)
-#endif
+ decl cond_lock-wakeup_seq(%ebx)
jne 5f
6: xorl %eax, %eax
/* Unlock in loop requires waekup. */
5:
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
-#endif
+ leal cond_lock-wakeup_seq(%ebx), %eax
call __lll_mutex_unlock_wake
jmp 6b
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
pushl %esi
pushl %ebx
-#if cond_lock != 0
- addl $cond_lock, %ebx
-#endif
movl 12(%esp), %ebx
adcl $0, wakeup_seq+4(%ebx)
/* Wake up one thread. */
- addl $wakeup_seq-cond_lock, %ebx
+ addl $wakeup_seq, %ebx
movl $FUTEX_WAKE, %ecx
xorl %esi, %esi
movl $SYS_futex, %eax
movl %ecx, %edx /* movl $1, %edx */
ENTER_KERNEL
- subl $wakeup_seq-cond_lock, %ebx
-
/* Unlock. */
4: LOCK
-#if cond_lock == 0
- decl (%ebx)
-#else
- decl cond_lock(%ebx)
-#endif
+ decl cond_lock-wakeup_seq(%ebx)
jne 5f
6: xorl %eax, %eax
/* Unlock in loop requires waekup. */
5:
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
-#endif
+ leal cond_lock-wakeup_seq(%ebx), %eax
call __lll_mutex_unlock_wake
jmp 6b
.size __pthread_cond_signal, .-__pthread_cond_signal
movl 20(%esp), %ebx
movl 28(%esp), %ebp
-#if cond_lock != 0
- addl $cond_lock, %ebx
-#endif
/* Get internal lock. */
movl $1, %eax
leal 4(%esp), %esi
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
movl %edi, %edx
- addl $wakeup_seq-cond_lock, %ebx
+ addl $wakeup_seq, %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- subl $wakeup_seq-cond_lock, %ebx
+ subl $wakeup_seq, %ebx
movl %eax, %esi
call __pthread_disable_asynccancel
__condvar_cleanup:
pushl %ebx
movl 8(%esp), %ebx
-#if cond_lock != 0
- addl $cond_lock, %ebx
-#endif
/* Get internal lock. */
movl $1, %eax
adcl $0, woken_seq+4(%ebx)
LOCK
+#if cond_lock == 0
decl (%ebx)
+#else
+ decl cond_lock(%ebx)
+#endif
je 2f
#if cond_lock == 0
movl %ebx, %eax
xorl %esi, %esi
movl 16(%esp), %ebx
-#if cond_lock != 0
- addl $cond_lock, %ebx
-#endif
/* Get internal lock. */
movl $1, %eax
movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
movl %edi, %edx
- addl $wakeup_seq-cond_lock, %ebx
+ addl $wakeup_seq, %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- subl $wakeup_seq-cond_lock, %ebx
+ subl $wakeup_seq, %ebx
call __pthread_disable_asynccancel