From a2f1bd9ce980a18a422464cfb3b2eb75be1de93f Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Fri, 24 Oct 2014 15:32:02 +0100 Subject: [PATCH] Revert "The arm64 architecture has the ability to exclusively load and store" Steve Capper has posted a v2 of the patch that gives a better path performance. This reverts commit 64b57f9219ea2f251c4c3c43c3b36187287d7f9d. Signed-off-by: Liviu Dudau --- arch/arm64/Kconfig | 2 - arch/arm64/include/asm/cmpxchg.h | 77 -------------------------------- 2 files changed, 79 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ca9130851f8..c52894e6130 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -29,13 +29,11 @@ config ARM64 select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND - select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_C_RECORDMCOUNT - select HAVE_CMPXCHG_DOUBLE select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index ad3d7d1f842..57c0fa7bf71 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -19,7 +19,6 @@ #define __ASM_CMPXCHG_H #include -#include #include @@ -148,51 +147,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return oldval; } -#define system_has_cmpxchg_double() 1 - -static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2, - unsigned long old1, unsigned long old2, - unsigned long new1, unsigned long new2, int size) -{ - unsigned long loop, lost; - - switch (size) { - case 8: - VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1); - do { - asm volatile("// __cmpxchg_double8\n" - " ldxp %0, %1, %2\n" - " eor %0, %0, %3\n" - " eor %1, %1, %4\n" - " orr %1, %0, %1\n" - " mov %w0, #0\n" - " cbnz %1, 1f\n" - " stxp %w0, %5, %6, %2\n" - "1:\n" - : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1) - : "r" (old1), "r"(old2), "r"(new1), "r"(new2)); - } while (loop); - break; - default: - BUILD_BUG(); - } - - return !lost; -} - -static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2, - unsigned long old1, unsigned long old2, - unsigned long new1, unsigned long new2, int size) -{ - int ret; - - smp_mb(); - ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size); - smp_mb(); - - return ret; -} - static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, unsigned long new, int size) { @@ -214,15 +168,6 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, __ret; \ }) -#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \ - (unsigned long)(o2), (unsigned long)(n1), \ - (unsigned long)(n2), sizeof(*(ptr1)));\ - __ret; \ -}) - #define cmpxchg_local(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ @@ -232,28 +177,6 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, __ret; \ }) -#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - unsigned long flags;\ - raw_local_irq_save(flags);\ - __ret = cmpxchg_double(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ - o1, o2, n1, n2); \ - raw_local_irq_restore(flags); \ - __ret; \ -}) - -#define this_cpu_cmpxchg_double_4(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - unsigned long flags;\ - raw_local_irq_save(flags);\ - __ret = cmpxchg_double(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ - o1, o2, n1, n2); \ - raw_local_irq_restore(flags); \ - __ret; \ -}) - #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) -- 2.34.1