From 07ee0a78c382f92ab550df421f1c01ff87e5848b Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Thu, 14 Feb 2013 20:34:05 +0400 Subject: [PATCH] Implement AO_and/or/xor efficiently for ARM * src/atomic_ops/sysdeps/gcc/arm.h (AO_and, AO_or, AO_xor): Implement directly using LDREX/STREX (enabled only if not AO_PREFER_GENERALIZED). --- src/atomic_ops/sysdeps/gcc/arm.h | 57 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/src/atomic_ops/sysdeps/gcc/arm.h b/src/atomic_ops/sysdeps/gcc/arm.h index 95d2f6b..e3a6bf8 100644 --- a/src/atomic_ops/sysdeps/gcc/arm.h +++ b/src/atomic_ops/sysdeps/gcc/arm.h @@ -324,6 +324,63 @@ AO_fetch_and_sub1(volatile AO_t *p) return result; } #define AO_HAVE_fetch_and_sub1 + +AO_INLINE void +AO_and(volatile AO_t *p, AO_t value) +{ + AO_t tmp, result; + + __asm__ __volatile__("@AO_and\n" + AO_THUMB_GO_ARM + "1: ldrex %0, [%4]\n" + " and %1, %0, %3\n" + " strex %0, %1, [%4]\n" + " teq %0, #0\n" + " bne 1b\n" + AO_THUMB_RESTORE_MODE + : "=&r" (tmp), "=&r" (result), "+m" (*p) + : "r" (value), "r" (p) + : AO_THUMB_SWITCH_CLOBBERS "cc"); +} +#define AO_HAVE_and + +AO_INLINE void +AO_or(volatile AO_t *p, AO_t value) +{ + AO_t tmp, result; + + __asm__ __volatile__("@AO_or\n" + AO_THUMB_GO_ARM + "1: ldrex %0, [%4]\n" + " orr %1, %0, %3\n" + " strex %0, %1, [%4]\n" + " teq %0, #0\n" + " bne 1b\n" + AO_THUMB_RESTORE_MODE + : "=&r" (tmp), "=&r" (result), "+m" (*p) + : "r" (value), "r" (p) + : AO_THUMB_SWITCH_CLOBBERS "cc"); +} +#define AO_HAVE_or + +AO_INLINE void +AO_xor(volatile AO_t *p, AO_t value) +{ + AO_t tmp, result; + + __asm__ __volatile__("@AO_xor\n" + AO_THUMB_GO_ARM + "1: ldrex %0, [%4]\n" + " eor %1, %0, %3\n" + " strex %0, %1, [%4]\n" + " teq %0, #0\n" + " bne 1b\n" + AO_THUMB_RESTORE_MODE + : "=&r" (tmp), "=&r" (result), "+m" (*p) + : "r" (value), "r" (p) + : AO_THUMB_SWITCH_CLOBBERS "cc"); +} +#define AO_HAVE_xor #endif /* !AO_PREFER_GENERALIZED */ #ifdef AO_ARM_HAVE_LDREXBH -- 2.7.4