ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock
authorWill Deacon <will.deacon@arm.com>
Mon, 12 Aug 2013 17:04:05 +0000 (18:04 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Mar 2014 05:30:13 +0000 (21:30 -0800)
commit 00efaa0250939dc148e2d3104fb3c18395d24a2d upstream.

Commit 15e7e5c1ebf5 ("ARM: 7749/1: spinlock: retry trylock operation if
strex fails on free lock") modifying our arch_spin_trylock to retry the
acquisition if the lock appeared uncontended, but the strex failed.

This patch does the same for rwlocks, which were missed by the original
patch.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Li Zefan <lizefan@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/spinlock.h

index f8b8965..dd64cc6 100644 (file)
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-       unsigned long tmp;
+       unsigned long contended, res;
 
-       __asm__ __volatile__(
-"      ldrex   %0, [%1]\n"
-"      teq     %0, #0\n"
-"      strexeq %0, %2, [%1]"
-       : "=&r" (tmp)
-       : "r" (&rw->lock), "r" (0x80000000)
-       : "cc");
+       do {
+               __asm__ __volatile__(
+               "       ldrex   %0, [%2]\n"
+               "       mov     %1, #0\n"
+               "       teq     %0, #0\n"
+               "       strexeq %1, %3, [%2]"
+               : "=&r" (contended), "=&r" (res)
+               : "r" (&rw->lock), "r" (0x80000000)
+               : "cc");
+       } while (res);
 
-       if (tmp == 0) {
+       if (!contended) {
                smp_mb();
                return 1;
        } else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-       unsigned long tmp, tmp2 = 1;
+       unsigned long contended, res;
 
-       __asm__ __volatile__(
-"      ldrex   %0, [%2]\n"
-"      adds    %0, %0, #1\n"
-"      strexpl %1, %0, [%2]\n"
-       : "=&r" (tmp), "+r" (tmp2)
-       : "r" (&rw->lock)
-       : "cc");
+       do {
+               __asm__ __volatile__(
+               "       ldrex   %0, [%2]\n"
+               "       mov     %1, #0\n"
+               "       adds    %0, %0, #1\n"
+               "       strexpl %1, %0, [%2]"
+               : "=&r" (contended), "=&r" (res)
+               : "r" (&rw->lock)
+               : "cc");
+       } while (res);
 
-       smp_mb();
-       return tmp2 == 0;
+       /* If the lock is negative, then it is already held for write. */
+       if (contended < 0x80000000) {
+               smp_mb();
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /* read_can_lock - would read_trylock() succeed? */