* check that we are profiling
* and that we aren't recursively invoked.
*/
- if (atomic_compare_and_exchange_acq (&p->state, GMON_PROF_BUSY,
- GMON_PROF_ON))
+ if (atomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
+ GMON_PROF_ON))
return;
/*
protect for multiple executions since these are fatal. */
static long int already_called;
- if (! atomic_compare_and_exchange_acq (&already_called, 1, 0))
+ if (! atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
{
void * const *p;
+2003-03-21 Ulrich Drepper <drepper@redhat.com>
+
+ * cancellation.c: Adjust for new form of compare&exchange macros.
+ * cleanup_defer.c: Likewise.
+ * init.c: Likewise.
+ * libc-cancellation.c: Likewise.
+ * old_pthread_cond_broadcast.c: Likewise.
+ * old_pthread_cond_signal.c: Likewise.
+ * old_pthread_cond_timedwait.c: Likewise.
+ * old_pthread_cond_wait.c: Likewise.
+ * pthread_cancel.c: Likewise.
+ * pthread_create.c: Likewise.
+ * pthread_detach.c: Likewise.
+ * pthread_join.c: Likewise.
+ * pthread_key_delete.c: Likewise.
+ * pthread_setcancelstate.c: Likewise.
+ * pthread_setcanceltype.c: Likewise.
+ * pthread_timedjoin.c: Likewise.
+ * pthread_tryjoin.c: Likewise.
+ * sysdeps/pthread/createthread.c: Likewise.
+
2003-03-20 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Include <atomic.h>.
if (newval == oldval)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
if (newval == oldval)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
if (newval == oldval)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
break;
}
}
/* Disable asynchronous cancellation for now. */
if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0))
{
- while (atomic_compare_and_exchange_acq (&self->cancelhandling,
- cancelhandling
- & ~CANCELTYPE_BITMASK,
- cancelhandling) != 0)
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ cancelhandling
+ & ~CANCELTYPE_BITMASK,
+ cancelhandling))
cancelhandling = self->cancelhandling;
}
&& ((cancelhandling = THREAD_GETMEM (self, cancelhandling))
& CANCELTYPE_BITMASK) == 0)
{
- while (atomic_compare_and_exchange_acq (&self->cancelhandling,
- cancelhandling
- | CANCELTYPE_BITMASK,
- cancelhandling) != 0)
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ cancelhandling
+ | CANCELTYPE_BITMASK,
+ cancelhandling))
cancelhandling = self->cancelhandling;
CANCELLATION_P (self);
/* Already canceled or exiting. */
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
/* Set the return value. */
THREAD_SETMEM (self, result, PTHREAD_CANCELED);
{
struct pthread *self = THREAD_SELF;
int oldval;
+ int newval;
- while (1)
+ do
{
oldval = THREAD_GETMEM (self, cancelhandling);
- int newval = oldval | CANCELTYPE_BITMASK;
+ newval = oldval | CANCELTYPE_BITMASK;
if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
{
if ((oldval & EXITING_BITMASK) != 0)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
/* Somebody else modified the word, try again. */
continue;
/* NOTREACHED */
}
-
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
- break;
}
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval));
return oldval;
}
return;
struct pthread *self = THREAD_SELF;
+ int oldval;
+ int newval;
- while (1)
+ do
{
- int oldval = THREAD_GETMEM (self, cancelhandling);
- int newval = oldval & ~CANCELTYPE_BITMASK;
+ oldval = THREAD_GETMEM (self, cancelhandling);
+ newval = oldval & ~CANCELTYPE_BITMASK;
if (newval == oldval)
break;
-
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
- break;
}
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling, newval,
+ oldval));
}
#endif
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
return ESRCH;
int result = 0;
- while (1)
+ int oldval;
+ int newval;
+ do
{
- int oldval = pd->cancelhandling;
- int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
+ oldval = pd->cancelhandling;
+ newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
/* Avoid doing unnecessary work. The atomic operation can
potentially be expensive if the bug has to be locked and
break;
}
-
- /* Mark the thread as canceled. This has to be done
- atomically since other bits could be modified as well. */
- if (atomic_compare_and_exchange_acq (&pd->cancelhandling, newval,
- oldval) == 0)
- break;
}
+ /* Mark the thread as canceled. This has to be done
+ atomically since other bits could be modified as well. */
+ while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
+ oldval));
return result;
}
do
pd->nextevent = __nptl_last_event;
- while (atomic_compare_and_exchange_acq (&__nptl_last_event, pd,
- pd->nextevent) != 0);
+ while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
+ pd, pd->nextevent));
}
/* Now call the function to signal the event. */
int result = 0;
/* Mark the thread as detached. */
- if (atomic_compare_and_exchange_acq (&pd->joinid, pd, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&pd->joinid, pd, NULL))
{
/* There are two possibilities here. First, the thread might
already be detached. In this case we return EINVAL.
/* Wait for the thread to finish. If it is already locked something
is wrong. There can only be one waiter. */
- if (__builtin_expect (atomic_compare_and_exchange_acq (&pd->joinid, self,
- NULL) != 0, 0))
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
+ self,
+ NULL), 0))
/* There is already somebody waiting for the thread. */
return EINVAL;
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
unsigned int seq = __pthread_keys[key].seq;
if (__builtin_expect (! KEY_UNUSED (seq), 1)
- && atomic_compare_and_exchange_acq (&__pthread_keys[key].seq,
- seq + 1, seq) == 0)
+ && ! atomic_compare_and_exchange_bool_acq (&__pthread_keys[key].seq,
+ seq + 1, seq))
/* We deleted a valid key. */
result = 0;
}
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
/* Update the cancel handling word. This has to be done
atomically since other bits could be modified as well. */
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
__do_cancel ();
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
/* Update the cancel handling word. This has to be done
atomically since other bits could be modified as well. */
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
/* Wait for the thread to finish. If it is already locked something
is wrong. There can only be one waiter. */
- if (__builtin_expect (atomic_compare_and_exchange_acq (&pd->joinid, self,
- NULL) != 0, 0))
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
+ self, NULL), 0))
/* There is already somebody waiting for the thread. */
return EINVAL;
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
/* Wait for the thread to finish. If it is already locked something
is wrong. There can only be one waiter. */
- if (atomic_compare_and_exchange_acq (&pd->joinid, self, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&pd->joinid, self, NULL))
/* There is already somebody waiting for the thread. */
return EINVAL;
/* Enqueue the descriptor. */
do
pd->nextevent = __nptl_last_event;
- while (atomic_compare_and_exchange_acq (&__nptl_last_event, pd,
- pd->nextevent) != 0);
+ while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, pd,
+ pd->nextevent) != 0);
/* Now call the function which signals the event. */
__nptl_create_event ();
/* Put the new entry in the first position. */
do
newp->next = table->array[hash];
- while (atomic_compare_and_exchange_acq (&table->array[hash], newp,
- newp->next));
+ while (atomic_compare_and_exchange_bool_acq (&table->array[hash], newp,
+ newp->next));
/* Update the statistics. */
if (data == (void *) -1)
for (f = &funcs->fns[funcs->idx - 1]; f >= &funcs->fns[0]; --f)
if ((d == NULL || d == f->func.cxa.dso_handle)
/* We don't want to run this cleanup more than once. */
- && (atomic_compare_and_exchange_acq (&f->flavor, ef_free, ef_cxa)
- == 0))
+ && ! atomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
+ ef_cxa))
(*f->func.cxa.fn) (f->func.cxa.arg, 0);
}
#endif
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK "cmpxchgb %b2, %1; setne %0" \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchgb %b2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK "cmpxchgw %w2, %1; setne %0" \
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchgw %w2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK "cmpxchgl %2, %1; setne %0" \
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
really going to be used the code below can be used on Intel Pentium
and later, but NOT on i486. */
#if 1
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
(abort (), 0)
#else
# ifdef __PIC__
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- int ignore; \
- __asm __volatile ("xchgl %3, %%ebx\n\t" \
- LOCK "cmpxchg8b %2, %1\n\t" \
- "setne %0\n\t" \
- "xchgl %3, %%ebx" \
- : "=a" (ret), "=m" (*mem), "=d" (ignore) \
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile ("xchgl %2, %%ebx\n\t" \
+ LOCK "cmpxchg8b %1\n\t" \
+ "xchgl %2, %%ebx" \
+ : "=A" (ret), "=m" (*mem) \
: "DS" (((unsigned long long int) (newval)) \
& 0xffffffff), \
"c" (((unsigned long long int) (newval)) >> 32), \
- "1" (*mem), "0" (((unsigned long long int) (oldval)) \
+ "1" (*mem), "a" (((unsigned long long int) (oldval)) \
& 0xffffffff), \
- "2" (((unsigned long long int) (oldval)) >> 32)); \
+ "d" (((unsigned long long int) (oldval)) >> 32)); \
ret; })
# else
# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- int ignore; \
- __asm __volatile (LOCK "cmpxchg8b %2, %1; setne %0" \
- : "=a" (ret), "=m" (*mem), "=d" (ignore) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchg8b %1" \
+ : "=A" (ret), "=m" (*mem) \
: "b" (((unsigned long long int) (newval)) \
& 0xffffffff), \
"c" (((unsigned long long int) (newval)) >> 32), \
- "1" (*mem), "0" (((unsigned long long int) (oldval)) \
+ "1" (*mem), "a" (((unsigned long long int) (oldval)) \
& 0xffffffff), \
- "2" (((unsigned long long int) (oldval)) >> 32)); \
+ "d" (((unsigned long long int) (oldval)) >> 32)); \
ret; })
# endif
#endif
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
(!__sync_bool_compare_and_swap_si ((int *) (mem), (int) (long) (oldval), \
(int) (long) (newval)))
-#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(!__sync_bool_compare_and_swap_di ((long *) (mem), (long) (oldval), \
(long) (newval)))
-#define __arch_compare_and_exchange_32_val_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ (abort (), 0)
+
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ (abort (), 0)
+
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
__sync_val_compare_and_swap_si ((int *) (mem), (int) (long) (oldval), \
(int) (long) (newval))
-#define __arch_compare_and_exchange_64_val_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
__sync_val_compare_and_swap_di ((long *) (mem), (long) (oldval), \
(long) (newval))
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
(abort (), 0)
#ifdef UP
* XXX this may not work properly on 64-bit if the register
* containing oldval has the high half non-zero for some reason.
*/
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
-({ \
- unsigned int __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: lwarx %0,0,%1\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "r" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+({ \
+ unsigned int __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: lwarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stwcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&r" (__tmp) \
+ : "r" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
})
#ifdef __powerpc64__
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval)\
-({ \
- unsigned long __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%1\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stdcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "r" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
+({ \
+ unsigned long __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: ldarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stdcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&r" (__tmp) \
+ : "r" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
})
#else /* powerpc32 */
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
#endif
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
({ unsigned int *__mem = (unsigned int *) (mem); \
unsigned int __old = (unsigned int) (oldval); \
unsigned int __cmp = __old; \
__cmp != __old; })
#ifdef __s390x__
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
({ unsigned long int *__mem = (unsigned long int *) (mem); \
unsigned long int __old = (unsigned long int) (oldval); \
unsigned long int __cmp = __old; \
/* For 31 bit we do not really need 64-bit compare-and-exchange. We can
implement them by use of the csd instruction. The straightforward
implementation causes warnings so we skip the definition for now. */
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
#endif
/* Now store the copied value. But do it atomically. */
assert (sizeof (long int) == sizeof (void *__unbounded));
- if (atomic_compare_and_exchange_acq (&mount_proc, copy_result, NULL) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&mount_proc, copy_result, NULL))
/* Replacing the value failed. This means another thread was
faster and we don't need the copy anymore. */
free (copy_result);
#endif
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgb %b2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgw %w2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgq %q2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \