locking/core: Remove {read,spin,write}_can_lock()
authorWill Deacon <will.deacon@arm.com>
Tue, 3 Oct 2017 18:25:27 +0000 (19:25 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 10 Oct 2017 09:50:18 +0000 (11:50 +0200)
Outside of the locking code itself, {read,spin,write}_can_lock() have no
users in tree. Apparmor (the last remaining user of write_can_lock()) got
moved over to lockdep by the previous patch.

This patch removes the use of {read,spin,write}_can_lock() from the
BUILD_LOCK_OPS macro, deferring to the trylock operation for testing the
lock status, and subsequently removes the unused macros altogether. They
aren't guaranteed to work in a concurrent environment and can give
incorrect results in the case of qrwlock.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1507055129-12300-2-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
24 files changed:
arch/alpha/include/asm/spinlock.h
arch/arc/include/asm/spinlock.h
arch/arm/include/asm/spinlock.h
arch/blackfin/include/asm/spinlock.h
arch/hexagon/include/asm/spinlock.h
arch/ia64/include/asm/spinlock.h
arch/m32r/include/asm/spinlock.h
arch/metag/include/asm/spinlock_lnkget.h
arch/metag/include/asm/spinlock_lock1.h
arch/mn10300/include/asm/spinlock.h
arch/parisc/include/asm/spinlock.h
arch/powerpc/include/asm/spinlock.h
arch/s390/include/asm/spinlock.h
arch/sh/include/asm/spinlock-cas.h
arch/sh/include/asm/spinlock-llsc.h
arch/sparc/include/asm/spinlock_32.h
arch/tile/include/asm/spinlock_32.h
arch/tile/include/asm/spinlock_64.h
arch/xtensa/include/asm/spinlock.h
include/asm-generic/qrwlock.h
include/linux/rwlock.h
include/linux/spinlock.h
include/linux/spinlock_up.h
kernel/locking/spinlock.c

index 718ac0b..7bff631 100644 (file)
@@ -54,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 /***********************************************************/
 
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
-       return (lock->lock & 1) == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
-       return lock->lock == 0;
-}
-
 static inline void arch_read_lock(arch_rwlock_t *lock)
 {
        long regx;
index 47efc84..ce9bfcf 100644 (file)
@@ -410,9 +410,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 
 #endif
 
-#define arch_read_can_lock(x)  ((x)->counter > 0)
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
 #define arch_read_lock_flags(lock, flags)      arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags)     arch_write_lock(lock)
 
index c030143..f522326 100644 (file)
@@ -193,9 +193,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        dsb_sev();
 }
 
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)         (ACCESS_ONCE((x)->lock) == 0)
-
 /*
  * Read locks are a bit more hairy:
  *  - Exclusively load the lock value.
@@ -273,9 +270,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        }
 }
 
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)          (ACCESS_ONCE((x)->lock) < 0x80000000)
-
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
index f643143..607ef98 100644 (file)
@@ -48,16 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        __raw_spin_unlock_asm(&lock->lock);
 }
 
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
-       return __raw_uncached_fetch_asm(&rw->lock) > 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
-       return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
-}
-
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        __raw_read_lock_asm(&rw->lock);
index 53a8d58..9f9414b 100644 (file)
@@ -86,16 +86,6 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
        return temp;
 }
 
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
-       return rwlock->lock == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
-       return rwlock->lock == 0;
-}
-
 /*  Stuffs a -1 in the lock value?  */
 static inline void arch_write_lock(arch_rwlock_t *lock)
 {
index df2c121..c728dda 100644 (file)
@@ -127,9 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
        arch_spin_lock(lock);
 }
 
-#define arch_read_can_lock(rw)         (*(volatile int *)(rw) >= 0)
-#define arch_write_can_lock(rw)        (*(volatile int *)(rw) == 0)
-
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
index a568255..0026013 100644 (file)
@@ -137,18 +137,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
  * semaphore.h for details.  -ben
  */
 
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
index ad8436f..6a932a9 100644 (file)
@@ -136,21 +136,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
                      : "memory");
 }
 
-/* write_can_lock - would write_trylock() succeed? */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
-       int ret;
-
-       asm volatile ("LNKGETD  %0, [%1]\n"
-                     "CMP      %0, #0\n"
-                     "MOV      %0, #1\n"
-                     "XORNZ     %0, %0, %0\n"
-                     : "=&d" (ret)
-                     : "da" (&rw->lock)
-                     : "cc");
-       return ret;
-}
-
 /*
  * Read locks are a bit more hairy:
  *  - Exclusively load the lock value.
@@ -224,21 +209,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        return tmp;
 }
 
-/* read_can_lock - would read_trylock() succeed? */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
-       int tmp;
-
-       asm volatile ("LNKGETD  %0, [%1]\n"
-                     "CMP      %0, %2\n"
-                     "MOV      %0, #1\n"
-                     "XORZ     %0, %0, %0\n"
-                     : "=&d" (tmp)
-                     : "da" (&rw->lock), "bd" (0x80000000)
-                     : "cc");
-       return tmp;
-}
-
 #define        arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define        arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
index c630444..8ae12bf 100644 (file)
@@ -104,16 +104,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        rw->lock = 0;
 }
 
-/* write_can_lock - would write_trylock() succeed? */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
-       unsigned int ret;
-
-       barrier();
-       ret = rw->lock;
-       return (ret == 0);
-}
-
 /*
  * Read locks are a bit more hairy:
  *  - Exclusively load the lock value.
@@ -171,14 +161,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        return (ret < 0x80000000);
 }
 
-/* read_can_lock - would read_trylock() succeed? */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
-       unsigned int ret;
-
-       barrier();
-       ret = rw->lock;
-       return (ret < 0x80000000);
-}
-
 #endif /* __ASM_SPINLOCK_LOCK1_H */
index fe413b4..54f75da 100644 (file)
@@ -98,18 +98,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
  * read-locks.
  */
 
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
 /*
  * On mn10300, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.
index 55bfe4a..136e1c9 100644 (file)
@@ -168,24 +168,6 @@ static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
        return result;
 }
 
-/*
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
-{
-       return rw->counter >= 0;
-}
-
-/*
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
-{
-       return !rw->counter;
-}
-
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
index edbe571..d83f4f7 100644 (file)
@@ -181,9 +181,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
  * read-locks.
  */
 
-#define arch_read_can_lock(rw)         ((rw)->lock >= 0)
-#define arch_write_can_lock(rw)        (!(rw)->lock)
-
 #ifdef CONFIG_PPC64
 #define __DO_SIGN_EXTEND       "extsw  %0,%0\n"
 #define WRLOCK_TOKEN           LOCK_TOKEN      /* it's negative */
index 8182b52..dc9c58e 100644 (file)
@@ -110,18 +110,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
  * read-locks.
  */
 
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
 
index 5ed7dbb..3154678 100644 (file)
@@ -53,18 +53,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
  * read-locks.
  */
 
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x)  ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned old;
index f77263a..06be4a5 100644 (file)
@@ -89,18 +89,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
  * read-locks.
  */
 
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x)  ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
index 67345b2..76986b8 100644 (file)
@@ -190,9 +190,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
-#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* __SPARC_SPINLOCK_H */
index cba8ba9..91d05f2 100644 (file)
@@ -80,22 +80,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 #define _RD_COUNT_WIDTH 8
 
 /**
- * arch_read_can_lock() - would read_trylock() succeed?
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
-       return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
-}
-
-/**
- * arch_write_can_lock() - would write_trylock() succeed?
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
-       return rwlock->lock == 0;
-}
-
-/**
  * arch_read_lock() - acquire a read lock.
  */
 void arch_read_lock(arch_rwlock_t *rwlock);
index 9a2c2d6..c802f48 100644 (file)
@@ -93,24 +93,6 @@ static inline int arch_write_val_locked(int val)
        return val < 0;  /* Optimize "val & __WRITE_LOCK_BIT". */
 }
 
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
-       return !arch_write_val_locked(rw->lock);
-}
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
-       return rw->lock == 0;
-}
-
 extern void __read_lock_failed(arch_rwlock_t *rw);
 
 static inline void arch_read_lock(arch_rwlock_t *rw)
index 3bb4968..d005af5 100644 (file)
@@ -97,8 +97,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
  *  0x80000000  one writer owns the rwlock, no other writers, no readers
  */
 
-#define arch_write_can_lock(x)  ((x)->lock == 0)
-
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
index 7d026bf..5092532 100644 (file)
@@ -53,24 +53,6 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queued_read_can_lock- would read_trylock() succeed?
- * @lock: Pointer to queue rwlock structure
- */
-static inline int queued_read_can_lock(struct qrwlock *lock)
-{
-       return !(atomic_read(&lock->cnts) & _QW_WMASK);
-}
-
-/**
- * queued_write_can_lock- would write_trylock() succeed?
- * @lock: Pointer to queue rwlock structure
- */
-static inline int queued_write_can_lock(struct qrwlock *lock)
-{
-       return !atomic_read(&lock->cnts);
-}
-
-/**
  * queued_read_trylock - try to acquire read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
@@ -169,8 +151,6 @@ static inline void queued_write_unlock(struct qrwlock *lock)
  * Remapping rwlock architecture specific functions to the corresponding
  * queue rwlock functions.
  */
-#define arch_read_can_lock(l)  queued_read_can_lock(l)
-#define arch_write_can_lock(l) queued_write_can_lock(l)
 #define arch_read_lock(l)      queued_read_lock(l)
 #define arch_write_lock(l)     queued_write_lock(l)
 #define arch_read_trylock(l)   queued_read_trylock(l)
index bc2994e..766c5ca 100644 (file)
@@ -50,9 +50,6 @@ do {                                                          \
 # define do_raw_write_unlock(rwlock)   do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
 #endif
 
-#define read_can_lock(rwlock)          arch_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock)         arch_write_can_lock(&(rwlock)->raw_lock)
-
 /*
  * Define the various rw_lock methods.  Note we define these
  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
index 69e079c..1e3e480 100644 (file)
@@ -278,12 +278,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
        1 : ({ local_irq_restore(flags); 0; }); \
 })
 
-/**
- * raw_spin_can_lock - would raw_spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define raw_spin_can_lock(lock)        (!raw_spin_is_locked(lock))
-
 /* Include rwlock functions */
 #include <linux/rwlock.h>
 
@@ -396,11 +390,6 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
        return raw_spin_is_contended(&lock->rlock);
 }
 
-static __always_inline int spin_can_lock(spinlock_t *lock)
-{
-       return raw_spin_can_lock(&lock->rlock);
-}
-
 #define assert_spin_locked(lock)       assert_raw_spin_locked(&(lock)->rlock)
 
 /*
index 612fb53..901cf8f 100644 (file)
@@ -77,7 +77,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 #define arch_spin_is_contended(lock)   (((void)(lock), 0))
 
-#define arch_read_can_lock(lock)       (((void)(lock), 1))
-#define arch_write_can_lock(lock)      (((void)(lock), 1))
-
 #endif /* __LINUX_SPINLOCK_UP_H */
index 4b082b5..8fd48b5 100644 (file)
@@ -32,8 +32,6 @@
  * include/linux/spinlock_api_smp.h
  */
 #else
-#define raw_read_can_lock(l)   read_can_lock(l)
-#define raw_write_can_lock(l)  write_can_lock(l)
 
 /*
  * Some architectures can relax in favour of the CPU owning the lock.
@@ -68,7 +66,7 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock)                 \
                                                                        \
                if (!(lock)->break_lock)                                \
                        (lock)->break_lock = 1;                         \
-               while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+               while ((lock)->break_lock)                              \
                        arch_##op##_relax(&lock->raw_lock);             \
        }                                                               \
        (lock)->break_lock = 0;                                         \
@@ -88,7 +86,7 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)        \
                                                                        \
                if (!(lock)->break_lock)                                \
                        (lock)->break_lock = 1;                         \
-               while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+               while ((lock)->break_lock)                              \
                        arch_##op##_relax(&lock->raw_lock);             \
        }                                                               \
        (lock)->break_lock = 0;                                         \