locking/percpu-rwsem: Extract __percpu_down_read_trylock()
authorPeter Zijlstra <peterz@infradead.org>
Thu, 31 Oct 2019 11:34:23 +0000 (12:34 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 11 Feb 2020 12:10:55 +0000 (13:10 +0100)
In preparation for removing the embedded rwsem and building a custom
lock, extract the read-trylock primitive.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Tested-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lkml.kernel.org/r/20200131151540.098485539@infradead.org
kernel/locking/percpu-rwsem.c

index becf925..b155e8e 100644 (file)
@@ -45,7 +45,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
 }
 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 
-bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
+static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
 {
        __this_cpu_inc(*sem->read_count);
 
@@ -73,11 +73,18 @@ bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
        if (likely(!smp_load_acquire(&sem->readers_block)))
                return true;
 
-       /*
-        * Per the above comment; we still have preemption disabled and
-        * will thus decrement on the same CPU as we incremented.
-        */
-       __percpu_up_read(sem);
+       __this_cpu_dec(*sem->read_count);
+
+       /* Prod writer to re-evaluate readers_active_check() */
+       rcuwait_wake_up(&sem->writer);
+
+       return false;
+}
+
+bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
+{
+       if (__percpu_down_read_trylock(sem))
+               return true;
 
        if (try)
                return false;