powerpc/qspinlock: allow lock stealing in trylock and lock fastpath
authorNicholas Piggin <npiggin@gmail.com>
Sat, 26 Nov 2022 09:59:27 +0000 (19:59 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 2 Dec 2022 06:48:50 +0000 (17:48 +1100)
This change allows trylock to steal the lock. It also allows the initial
lock attempt to steal the lock rather than bailing out and going to the
slow path.

This gives trylock more strength: without this a continually-contended
lock will never permit a trylock to succeed. With this change, the
trylock has a small but non-zero chance.

It also gives the lock fastpath most of the benefit of passing the
reservation back through to the steal loop in the slow path without the
complexity.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221126095932.1234527-13-npiggin@gmail.com
arch/powerpc/include/asm/qspinlock.h
arch/powerpc/lib/qspinlock.c

index 9572a2e..93b1c97 100644 (file)
@@ -6,6 +6,15 @@
 #include <asm/qspinlock_types.h>
 #include <asm/paravirt.h>
 
+/*
+ * The trylock itself may steal. This makes trylocks slightly stronger, and
+ * might make spin locks slightly more efficient when stealing.
+ *
+ * This is compile-time, so if true then there may always be stealers, so the
+ * nosteal paths become unused.
+ */
+#define _Q_SPIN_TRY_LOCK_STEAL 1
+
 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 {
        return READ_ONCE(lock->val);
@@ -27,13 +36,14 @@ static __always_inline u32 queued_spin_encode_locked_val(void)
        return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
 }
 
-static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+static __always_inline int __queued_spin_trylock_nosteal(struct qspinlock *lock)
 {
        u32 new = queued_spin_encode_locked_val();
        u32 prev;
 
+       /* Trylock succeeds only when unlocked and no queued nodes */
        asm volatile(
-"1:    lwarx   %0,0,%1,%3      # queued_spin_trylock                   \n"
+"1:    lwarx   %0,0,%1,%3      # __queued_spin_trylock_nosteal         \n"
 "      cmpwi   0,%0,0                                                  \n"
 "      bne-    2f                                                      \n"
 "      stwcx.  %2,0,%1                                                 \n"
@@ -72,6 +82,14 @@ static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
        return likely(!(prev & ~_Q_TAIL_CPU_MASK));
 }
 
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+       if (!_Q_SPIN_TRY_LOCK_STEAL)
+               return __queued_spin_trylock_nosteal(lock);
+       else
+               return __queued_spin_trylock_steal(lock);
+}
+
 void queued_spin_lock_slowpath(struct qspinlock *lock);
 
 static __always_inline void queued_spin_lock(struct qspinlock *lock)
index 2f6c0be..8e5b8bc 100644 (file)
@@ -24,7 +24,11 @@ struct qnodes {
 
 /* Tuning parameters */
 static int steal_spins __read_mostly = (1 << 5);
+#if _Q_SPIN_TRY_LOCK_STEAL == 1
+static const bool maybe_stealers = true;
+#else
 static bool maybe_stealers __read_mostly = true;
+#endif
 static int head_spins __read_mostly = (1 << 8);
 
 static bool pv_yield_owner __read_mostly = true;
@@ -483,6 +487,10 @@ void pv_spinlocks_init(void)
 #include <linux/debugfs.h>
 static int steal_spins_set(void *data, u64 val)
 {
+#if _Q_SPIN_TRY_LOCK_STEAL == 1
+       /* MAYBE_STEAL remains true */
+       steal_spins = val;
+#else
        static DEFINE_MUTEX(lock);
 
        /*
@@ -507,6 +515,7 @@ static int steal_spins_set(void *data, u64 val)
                steal_spins = val;
        }
        mutex_unlock(&lock);
+#endif
 
        return 0;
 }