#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
+#ifdef __native_client__
+ /* Mask instruction should immediately precede access instruction. */
+# define AO_MASK_PTR(reg) " bical " reg ", " reg ", #0xc0000000\n"
+# define AO_BR_ALIGN " .align 4\n"
+#else
+# define AO_MASK_PTR(reg) /* empty */
+# define AO_BR_ALIGN /* empty */
+#endif
+
#if defined(__thumb__) && !defined(__thumb2__)
/* Thumb One mode does not have ARM "mcr", "swp" and some load/store */
/* instructions, so we temporarily switch to ARM mode and go back */
" bx r3\n" \
" .align\n" \
" .arm\n" \
+ AO_BR_ALIGN \
"4:\n"
# define AO_THUMB_RESTORE_MODE \
" adr r3, 5f + 1\n" \
" bx r3\n" \
" .thumb\n" \
+ AO_BR_ALIGN \
"5:\n"
# define AO_THUMB_SWITCH_CLOBBERS "r3",
#else
__asm__ __volatile__("@AO_store\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%2]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%2")
+ " ldrex %0, [%2]\n"
+ AO_MASK_PTR("%2")
" strex %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_char_store\n"
AO_THUMB_GO_ARM
- "1: ldrexb %0, [%2]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%2")
+ " ldrexb %0, [%2]\n"
+ AO_MASK_PTR("%2")
" strexb %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_short_store\n"
AO_THUMB_GO_ARM
- "1: ldrexh %0, [%2]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%2")
+ " ldrexh %0, [%2]\n"
+ AO_MASK_PTR("%2")
" strexh %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_test_and_set\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%3]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%3")
+ " ldrex %0, [%3]\n"
+ AO_MASK_PTR("%3")
" strex %1, %4, [%3]\n"
" teq %1, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_fetch_and_add\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%5]\n" /* get original */
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%5")
+ " ldrex %0, [%5]\n" /* get original */
" add %2, %0, %4\n" /* sum up in incr */
+ AO_MASK_PTR("%5")
" strex %1, %2, [%5]\n" /* store them */
" teq %1, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_fetch_and_add1\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%4]\n" /* get original */
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%4")
+ " ldrex %0, [%4]\n" /* get original */
" add %1, %0, #1\n" /* increment */
+ AO_MASK_PTR("%4")
" strex %2, %1, [%4]\n" /* store them */
" teq %2, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_fetch_and_sub1\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%4]\n" /* get original */
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%4")
+ " ldrex %0, [%4]\n" /* get original */
" sub %1, %0, #1\n" /* decrement */
+ AO_MASK_PTR("%4")
" strex %2, %1, [%4]\n" /* store them */
" teq %2, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_and\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%4]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%4")
+ " ldrex %0, [%4]\n"
" and %1, %0, %3\n"
+ AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_or\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%4]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%4")
+ " ldrex %0, [%4]\n"
" orr %1, %0, %3\n"
+ AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_xor\n"
AO_THUMB_GO_ARM
- "1: ldrex %0, [%4]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%4")
+ " ldrex %0, [%4]\n"
" eor %1, %0, %3\n"
+ AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_char_fetch_and_add\n"
AO_THUMB_GO_ARM
- "1: ldrexb %0, [%5]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%5")
+ " ldrexb %0, [%5]\n"
" add %2, %0, %4\n"
+ AO_MASK_PTR("%5")
" strexb %1, %2, [%5]\n"
" teq %1, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_short_fetch_and_add\n"
AO_THUMB_GO_ARM
- "1: ldrexh %0, [%5]\n"
+ AO_BR_ALIGN
+ "1: " AO_MASK_PTR("%5")
+ " ldrexh %0, [%5]\n"
" add %2, %0, %4\n"
+ AO_MASK_PTR("%5")
" strexh %1, %2, [%5]\n"
" teq %1, #0\n"
" bne 1b\n"
__asm__ __volatile__("@AO_compare_and_swap\n"
AO_THUMB_GO_ARM
+ AO_BR_ALIGN
"1: mov %0, #2\n" /* store a flag */
+ AO_MASK_PTR("%3")
" ldrex %1, [%3]\n" /* get original */
" teq %1, %4\n" /* see if match */
+ AO_MASK_PTR("%3")
# ifdef __thumb2__
/* TODO: Eliminate warning: it blocks containing wide Thumb */
/* instructions are deprecated in ARMv8. */
__asm__ __volatile__("@AO_fetch_compare_and_swap\n"
AO_THUMB_GO_ARM
+ AO_BR_ALIGN
"1: mov %0, #2\n" /* store a flag */
+ AO_MASK_PTR("%3")
" ldrex %1, [%3]\n" /* get original */
" teq %1, %4\n" /* see if match */
+ AO_MASK_PTR("%3")
# ifdef __thumb2__
" it eq\n"
# endif
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_load\n"
+ AO_MASK_PTR("%1")
" ldrexd %0, %H0, [%1]"
: "=&r" (result.AO_whole)
: "r" (addr)
do {
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_store\n"
+ AO_MASK_PTR("%3")
" ldrexd %0, %H0, [%3]\n"
+ AO_MASK_PTR("%3")
" strexd %1, %4, %H4, [%3]"
: "=&r" (old_val.AO_whole), "=&r" (status), "+m" (*addr)
: "r" (addr), "r" (new_val.AO_whole)
do {
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_compare_and_swap\n"
+ AO_MASK_PTR("%1")
" ldrexd %0, %H0, [%1]\n" /* get original to r1 & r2 */
: "=&r"(tmp)
: "r"(addr)
if (tmp != old_val.AO_whole)
break;
__asm__ __volatile__(
+ AO_MASK_PTR("%2")
" strexd %0, %3, %H3, [%2]\n" /* store new one if matched */
: "=&r"(result), "+m"(*addr)
: "r" (addr), "r" (new_val.AO_whole)
__asm__ __volatile__("@AO_test_and_set_full\n"
AO_THUMB_GO_ARM
+ AO_MASK_PTR("%3")
" swp %0, %2, [%3]\n"
/* Ignore GCC "SWP is deprecated for this architecture" */
/* warning here (for ARMv6+). */