From 04cafed7fc19a8010771c788708ac97c405fc3de Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 12 May 2016 13:57:45 +0200 Subject: [PATCH] locking/rwsem: Fix down_write_killable() The new signal_pending exit path in __rwsem_down_write_failed_common() was fingered as breaking his kernel by Tetsuo Handa. Upon inspection it was found that there are two things wrong with it; - it forgets to remove WAITING_BIAS if it leaves the list empty, or - it forgets to wake further waiters that were blocked on the now removed waiter. Especially the first issue causes new lock attempts to block and stall indefinitely, as the code assumes that pending waiters mean there is an owner that will wake when it releases the lock. Reported-by: Tetsuo Handa Tested-by: Tetsuo Handa Tested-by: Michal Hocko Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Andrew Morton Cc: Arnaldo Carvalho de Melo Cc: Chris Zankel Cc: David S. Miller Cc: Davidlohr Bueso Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Linus Torvalds Cc: Max Filippov Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Tony Luck Cc: Vince Weaver Cc: Waiman Long Link: http://lkml.kernel.org/r/20160512115745.GP3192@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/locking/rwsem-xadd.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index df4dcb8..09e30c6 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -487,23 +487,32 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) /* Block until there are no active lockers. */ do { - if (signal_pending_state(state, current)) { - raw_spin_lock_irq(&sem->wait_lock); - ret = ERR_PTR(-EINTR); - goto out; - } + if (signal_pending_state(state, current)) + goto out_nolock; + schedule(); set_current_state(state); } while ((count = sem->count) & RWSEM_ACTIVE_MASK); raw_spin_lock_irq(&sem->wait_lock); } -out: __set_current_state(TASK_RUNNING); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); return ret; + +out_nolock: + __set_current_state(TASK_RUNNING); + raw_spin_lock_irq(&sem->wait_lock); + list_del(&waiter.list); + if (list_empty(&sem->wait_list)) + rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem); + else + __rwsem_do_wake(sem, RWSEM_WAKE_ANY); + raw_spin_unlock_irq(&sem->wait_lock); + + return ERR_PTR(-EINTR); } __visible struct rw_semaphore * __sched -- 2.7.4