1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30 #include <trace/events/lock.h>
32 #ifndef CONFIG_PREEMPT_RT
33 #include "lock_events.h"
36 * The least significant 2 bits of the owner value has the following
38 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
39 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
41 * When the rwsem is reader-owned and a spinning writer has timed out,
42 * the nonspinnable bit will be set to disable optimistic spinning.
44 * When a writer acquires a rwsem, it puts its task_struct pointer
45 * into the owner field. It is cleared after an unlock.
47 * When a reader acquires a rwsem, it will also puts its task_struct
48 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
49 * On unlock, the owner field will largely be left untouched. So
50 * for a free or reader-owned rwsem, the owner value may contain
51 * information about the last reader that acquires the rwsem.
53 * That information may be helpful in debugging cases where the system
54 * seems to hang on a reader owned rwsem especially if only one reader
55 * is involved. Ideally we would like to track all the readers that own
56 * a rwsem, but the overhead is simply too big.
58 * A fast path reader optimistic lock stealing is supported when the rwsem
59 * is previously owned by a writer and the following conditions are met:
60 * - rwsem is not currently writer owned
61 * - the handoff isn't set.
63 #define RWSEM_READER_OWNED (1UL << 0)
64 #define RWSEM_NONSPINNABLE (1UL << 1)
65 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
67 #ifdef CONFIG_DEBUG_RWSEMS
68 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
69 if (!debug_locks_silent && \
70 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
71 #c, atomic_long_read(&(sem)->count), \
72 (unsigned long) sem->magic, \
73 atomic_long_read(&(sem)->owner), (long)current, \
74 list_empty(&(sem)->wait_list) ? "" : "not ")) \
78 # define DEBUG_RWSEMS_WARN_ON(c, sem)
82 * On 64-bit architectures, the bit definitions of the count are:
84 * Bit 0 - writer locked bit
85 * Bit 1 - waiters present bit
86 * Bit 2 - lock handoff bit
88 * Bits 8-62 - 55-bit reader count
89 * Bit 63 - read fail bit
91 * On 32-bit architectures, the bit definitions of the count are:
93 * Bit 0 - writer locked bit
94 * Bit 1 - waiters present bit
95 * Bit 2 - lock handoff bit
97 * Bits 8-30 - 23-bit reader count
98 * Bit 31 - read fail bit
100 * It is not likely that the most significant bit (read fail bit) will ever
101 * be set. This guard bit is still checked anyway in the down_read() fastpath
102 * just in case we need to use up more of the reader bits for other purpose
105 * atomic_long_fetch_add() is used to obtain reader lock, whereas
106 * atomic_long_cmpxchg() will be used to obtain writer lock.
108 * There are three places where the lock handoff bit may be set or cleared.
109 * 1) rwsem_mark_wake() for readers -- set, clear
110 * 2) rwsem_try_write_lock() for writers -- set, clear
111 * 3) rwsem_del_waiter() -- clear
113 * For all the above cases, wait_lock will be held. A writer must also
114 * be the first one in the wait_list to be eligible for setting the handoff
115 * bit. So concurrent setting/clearing of handoff bit is not possible.
117 #define RWSEM_WRITER_LOCKED (1UL << 0)
118 #define RWSEM_FLAG_WAITERS (1UL << 1)
119 #define RWSEM_FLAG_HANDOFF (1UL << 2)
120 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
122 #define RWSEM_READER_SHIFT 8
123 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
124 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
125 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
126 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
127 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
128 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
131 * All writes to owner are protected by WRITE_ONCE() to make sure that
132 * store tearing can't happen as optimistic spinners may read and use
133 * the owner value concurrently without lock. Read from owner, however,
134 * may not need READ_ONCE() as long as the pointer value is only used
135 * for comparison and isn't being dereferenced.
137 * Both rwsem_{set,clear}_owner() functions should be in the same
138 * preempt disable section as the atomic op that changes sem->count.
140 static inline void rwsem_set_owner(struct rw_semaphore *sem)
142 lockdep_assert_preemption_disabled();
143 atomic_long_set(&sem->owner, (long)current);
146 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
148 lockdep_assert_preemption_disabled();
149 atomic_long_set(&sem->owner, 0);
153 * Test the flags in the owner field.
155 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
157 return atomic_long_read(&sem->owner) & flags;
161 * The task_struct pointer of the last owning reader will be left in
164 * Note that the owner value just indicates the task has owned the rwsem
165 * previously, it may not be the real owner or one of the real owners
166 * anymore when that field is examined, so take it with a grain of salt.
168 * The reader non-spinnable bit is preserved.
170 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
171 struct task_struct *owner)
173 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
174 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
176 atomic_long_set(&sem->owner, val);
179 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
181 __rwsem_set_reader_owned(sem, current);
185 * Return true if the rwsem is owned by a reader.
187 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
189 #ifdef CONFIG_DEBUG_RWSEMS
191 * Check the count to see if it is write-locked.
193 long count = atomic_long_read(&sem->count);
195 if (count & RWSEM_WRITER_MASK)
198 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
201 #ifdef CONFIG_DEBUG_RWSEMS
203 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
204 * is a task pointer in owner of a reader-owned rwsem, it will be the
205 * real owner or one of the real owners. The only exception is when the
206 * unlock is done by up_read_non_owner().
208 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
210 unsigned long val = atomic_long_read(&sem->owner);
212 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
213 if (atomic_long_try_cmpxchg(&sem->owner, &val,
214 val & RWSEM_OWNER_FLAGS_MASK))
219 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
225 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
226 * remains set. Otherwise, the operation will be aborted.
228 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
230 unsigned long owner = atomic_long_read(&sem->owner);
233 if (!(owner & RWSEM_READER_OWNED))
235 if (owner & RWSEM_NONSPINNABLE)
237 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
238 owner | RWSEM_NONSPINNABLE));
241 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
243 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
245 if (WARN_ON_ONCE(*cntp < 0))
246 rwsem_set_nonspinnable(sem);
248 if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
249 rwsem_set_reader_owned(sem);
256 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
258 long tmp = RWSEM_UNLOCKED_VALUE;
262 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
263 rwsem_set_owner(sem);
272 * Return just the real task structure pointer of the owner
274 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
276 return (struct task_struct *)
277 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
281 * Return the real task structure pointer of the owner and the embedded
282 * flags in the owner. pflags must be non-NULL.
284 static inline struct task_struct *
285 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
287 unsigned long owner = atomic_long_read(&sem->owner);
289 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
290 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
294 * Guide to the rw_semaphore's count field.
296 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
299 * The lock is owned by readers when
300 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
301 * (2) some of the reader bits are set in count, and
302 * (3) the owner field has RWSEM_READ_OWNED bit set.
304 * Having some reader bits set is not enough to guarantee a readers owned
305 * lock as the readers may be in the process of backing out from the count
306 * and a writer has just released the lock. So another writer may steal
307 * the lock immediately after that.
311 * Initialize an rwsem:
313 void __init_rwsem(struct rw_semaphore *sem, const char *name,
314 struct lock_class_key *key)
316 #ifdef CONFIG_DEBUG_LOCK_ALLOC
318 * Make sure we are not reinitializing a held semaphore:
320 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
321 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
323 #ifdef CONFIG_DEBUG_RWSEMS
326 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
327 raw_spin_lock_init(&sem->wait_lock);
328 INIT_LIST_HEAD(&sem->wait_list);
329 atomic_long_set(&sem->owner, 0L);
330 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
331 osq_lock_init(&sem->osq);
334 EXPORT_SYMBOL(__init_rwsem);
336 enum rwsem_waiter_type {
337 RWSEM_WAITING_FOR_WRITE,
338 RWSEM_WAITING_FOR_READ
341 struct rwsem_waiter {
342 struct list_head list;
343 struct task_struct *task;
344 enum rwsem_waiter_type type;
345 unsigned long timeout;
348 #define rwsem_first_waiter(sem) \
349 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
351 enum rwsem_wake_type {
352 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
353 RWSEM_WAKE_READERS, /* Wake readers only */
354 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
358 * The typical HZ value is either 250 or 1000. So set the minimum waiting
359 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
360 * queue before initiating the handoff protocol.
362 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
365 * Magic number to batch-wakeup waiting readers, even when writers are
366 * also present in the queue. This both limits the amount of work the
367 * waking thread must do and also prevents any potential counter overflow,
370 #define MAX_READERS_WAKEUP 0x100
373 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
375 lockdep_assert_held(&sem->wait_lock);
376 list_add_tail(&waiter->list, &sem->wait_list);
377 /* caller will set RWSEM_FLAG_WAITERS */
381 * Remove a waiter from the wait_list and clear flags.
383 * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
384 * this function. Modify with care.
386 * Return: true if wait_list isn't empty and false otherwise
389 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
391 lockdep_assert_held(&sem->wait_lock);
392 list_del(&waiter->list);
393 if (likely(!list_empty(&sem->wait_list)))
396 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
401 * handle the lock release when processes blocked on it that can now run
402 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
404 * - there must be someone on the queue
405 * - the wait_lock must be held by the caller
406 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
407 * to actually wakeup the blocked task(s) and drop the reference count,
408 * preferably when the wait_lock is released
409 * - woken process blocks are discarded from the list after having task zeroed
410 * - writers are only marked woken if downgrading is false
412 * Implies rwsem_del_waiter() for all woken readers.
414 static void rwsem_mark_wake(struct rw_semaphore *sem,
415 enum rwsem_wake_type wake_type,
416 struct wake_q_head *wake_q)
418 struct rwsem_waiter *waiter, *tmp;
419 long oldcount, woken = 0, adjustment = 0;
420 struct list_head wlist;
422 lockdep_assert_held(&sem->wait_lock);
425 * Take a peek at the queue head waiter such that we can determine
426 * the wakeup(s) to perform.
428 waiter = rwsem_first_waiter(sem);
430 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
431 if (wake_type == RWSEM_WAKE_ANY) {
433 * Mark writer at the front of the queue for wakeup.
434 * Until the task is actually later awoken later by
435 * the caller, other writers are able to steal it.
436 * Readers, on the other hand, will block as they
437 * will notice the queued writer.
439 wake_q_add(wake_q, waiter->task);
440 lockevent_inc(rwsem_wake_writer);
447 * No reader wakeup if there are too many of them already.
449 if (unlikely(atomic_long_read(&sem->count) < 0))
453 * Writers might steal the lock before we grant it to the next reader.
454 * We prefer to do the first reader grant before counting readers
455 * so we can bail out early if a writer stole the lock.
457 if (wake_type != RWSEM_WAKE_READ_OWNED) {
458 struct task_struct *owner;
460 adjustment = RWSEM_READER_BIAS;
461 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
462 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
464 * When we've been waiting "too" long (for writers
465 * to give up the lock), request a HANDOFF to
468 if (time_after(jiffies, waiter->timeout)) {
469 if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
470 adjustment -= RWSEM_FLAG_HANDOFF;
471 lockevent_inc(rwsem_rlock_handoff);
473 waiter->handoff_set = true;
476 atomic_long_add(-adjustment, &sem->count);
480 * Set it to reader-owned to give spinners an early
481 * indication that readers now have the lock.
482 * The reader nonspinnable bit seen at slowpath entry of
483 * the reader is copied over.
485 owner = waiter->task;
486 __rwsem_set_reader_owned(sem, owner);
490 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
491 * queue. We know that the woken will be at least 1 as we accounted
492 * for above. Note we increment the 'active part' of the count by the
493 * number of readers before waking any processes up.
495 * This is an adaptation of the phase-fair R/W locks where at the
496 * reader phase (first waiter is a reader), all readers are eligible
497 * to acquire the lock at the same time irrespective of their order
498 * in the queue. The writers acquire the lock according to their
499 * order in the queue.
501 * We have to do wakeup in 2 passes to prevent the possibility that
502 * the reader count may be decremented before it is incremented. It
503 * is because the to-be-woken waiter may not have slept yet. So it
504 * may see waiter->task got cleared, finish its critical section and
505 * do an unlock before the reader count increment.
507 * 1) Collect the read-waiters in a separate list, count them and
508 * fully increment the reader count in rwsem.
509 * 2) For each waiters in the new list, clear waiter->task and
510 * put them into wake_q to be woken up later.
512 INIT_LIST_HEAD(&wlist);
513 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
514 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
518 list_move_tail(&waiter->list, &wlist);
521 * Limit # of readers that can be woken up per wakeup call.
523 if (unlikely(woken >= MAX_READERS_WAKEUP))
527 adjustment = woken * RWSEM_READER_BIAS - adjustment;
528 lockevent_cond_inc(rwsem_wake_reader, woken);
530 oldcount = atomic_long_read(&sem->count);
531 if (list_empty(&sem->wait_list)) {
533 * Combined with list_move_tail() above, this implies
534 * rwsem_del_waiter().
536 adjustment -= RWSEM_FLAG_WAITERS;
537 if (oldcount & RWSEM_FLAG_HANDOFF)
538 adjustment -= RWSEM_FLAG_HANDOFF;
541 * When we've woken a reader, we no longer need to force
542 * writers to give up the lock and we can clear HANDOFF.
544 if (oldcount & RWSEM_FLAG_HANDOFF)
545 adjustment -= RWSEM_FLAG_HANDOFF;
549 atomic_long_add(adjustment, &sem->count);
552 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
553 struct task_struct *tsk;
556 get_task_struct(tsk);
559 * Ensure calling get_task_struct() before setting the reader
560 * waiter to nil such that rwsem_down_read_slowpath() cannot
561 * race with do_exit() by always holding a reference count
562 * to the task to wakeup.
564 smp_store_release(&waiter->task, NULL);
566 * Ensure issuing the wakeup (either by us or someone else)
567 * after setting the reader waiter to nil.
569 wake_q_add_safe(wake_q, tsk);
574 * Remove a waiter and try to wake up other waiters in the wait queue
575 * This function is called from the out_nolock path of both the reader and
576 * writer slowpaths with wait_lock held. It releases the wait_lock and
577 * optionally wake up waiters before it returns.
580 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
581 struct wake_q_head *wake_q)
582 __releases(&sem->wait_lock)
584 bool first = rwsem_first_waiter(sem) == waiter;
589 * If the wait_list isn't empty and the waiter to be deleted is
590 * the first waiter, we wake up the remaining waiters as they may
591 * be eligible to acquire or spin on the lock.
593 if (rwsem_del_waiter(sem, waiter) && first)
594 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q);
595 raw_spin_unlock_irq(&sem->wait_lock);
596 if (!wake_q_empty(wake_q))
601 * This function must be called with the sem->wait_lock held to prevent
602 * race conditions between checking the rwsem wait list and setting the
603 * sem->count accordingly.
605 * Implies rwsem_del_waiter() on success.
607 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
608 struct rwsem_waiter *waiter)
610 struct rwsem_waiter *first = rwsem_first_waiter(sem);
613 lockdep_assert_held(&sem->wait_lock);
615 count = atomic_long_read(&sem->count);
617 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
621 * Honor handoff bit and yield only when the first
622 * waiter is the one that set it. Otherwisee, we
623 * still try to acquire the rwsem.
625 if (first->handoff_set && (waiter != first))
631 if (count & RWSEM_LOCK_MASK) {
633 * A waiter (first or not) can set the handoff bit
634 * if it is an RT task or wait in the wait queue
637 if (has_handoff || (!rt_task(waiter->task) &&
638 !time_after(jiffies, waiter->timeout)))
641 new |= RWSEM_FLAG_HANDOFF;
643 new |= RWSEM_WRITER_LOCKED;
644 new &= ~RWSEM_FLAG_HANDOFF;
646 if (list_is_singular(&sem->wait_list))
647 new &= ~RWSEM_FLAG_WAITERS;
649 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
652 * We have either acquired the lock with handoff bit cleared or set
653 * the handoff bit. Only the first waiter can have its handoff_set
654 * set here to enable optimistic spinning in slowpath loop.
656 if (new & RWSEM_FLAG_HANDOFF) {
657 first->handoff_set = true;
658 lockevent_inc(rwsem_wlock_handoff);
663 * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
666 list_del(&waiter->list);
667 rwsem_set_owner(sem);
672 * The rwsem_spin_on_owner() function returns the following 4 values
673 * depending on the lock owner state.
674 * OWNER_NULL : owner is currently NULL
675 * OWNER_WRITER: when owner changes and is a writer
676 * OWNER_READER: when owner changes and the new owner may be a reader.
677 * OWNER_NONSPINNABLE:
678 * when optimistic spinning has to stop because either the
679 * owner stops running, is unknown, or its timeslice has
684 OWNER_WRITER = 1 << 1,
685 OWNER_READER = 1 << 2,
686 OWNER_NONSPINNABLE = 1 << 3,
689 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
691 * Try to acquire write lock before the writer has been put on wait queue.
693 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
695 long count = atomic_long_read(&sem->count);
697 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
698 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
699 count | RWSEM_WRITER_LOCKED)) {
700 rwsem_set_owner(sem);
701 lockevent_inc(rwsem_opt_lock);
708 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
710 struct task_struct *owner;
714 if (need_resched()) {
715 lockevent_inc(rwsem_opt_fail);
721 * Disable preemption is equal to the RCU read-side crital section,
722 * thus the task_strcut structure won't go away.
724 owner = rwsem_owner_flags(sem, &flags);
726 * Don't check the read-owner as the entry may be stale.
728 if ((flags & RWSEM_NONSPINNABLE) ||
729 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
733 lockevent_cond_inc(rwsem_opt_fail, !ret);
737 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
739 static inline enum owner_state
740 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
742 if (flags & RWSEM_NONSPINNABLE)
743 return OWNER_NONSPINNABLE;
745 if (flags & RWSEM_READER_OWNED)
748 return owner ? OWNER_WRITER : OWNER_NULL;
751 static noinline enum owner_state
752 rwsem_spin_on_owner(struct rw_semaphore *sem)
754 struct task_struct *new, *owner;
755 unsigned long flags, new_flags;
756 enum owner_state state;
758 lockdep_assert_preemption_disabled();
760 owner = rwsem_owner_flags(sem, &flags);
761 state = rwsem_owner_state(owner, flags);
762 if (state != OWNER_WRITER)
767 * When a waiting writer set the handoff flag, it may spin
768 * on the owner as well. Once that writer acquires the lock,
769 * we can spin on it. So we don't need to quit even when the
770 * handoff bit is set.
772 new = rwsem_owner_flags(sem, &new_flags);
773 if ((new != owner) || (new_flags != flags)) {
774 state = rwsem_owner_state(new, new_flags);
779 * Ensure we emit the owner->on_cpu, dereference _after_
780 * checking sem->owner still matches owner, if that fails,
781 * owner might point to free()d memory, if it still matches,
782 * our spinning context already disabled preemption which is
783 * equal to RCU read-side crital section ensures the memory
788 if (need_resched() || !owner_on_cpu(owner)) {
789 state = OWNER_NONSPINNABLE;
800 * Calculate reader-owned rwsem spinning threshold for writer
802 * The more readers own the rwsem, the longer it will take for them to
803 * wind down and free the rwsem. So the empirical formula used to
804 * determine the actual spinning time limit here is:
806 * Spinning threshold = (10 + nr_readers/2)us
808 * The limit is capped to a maximum of 25us (30 readers). This is just
809 * a heuristic and is subjected to change in the future.
811 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
813 long count = atomic_long_read(&sem->count);
814 int readers = count >> RWSEM_READER_SHIFT;
819 delta = (20 + readers) * NSEC_PER_USEC / 2;
821 return sched_clock() + delta;
824 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
827 int prev_owner_state = OWNER_NULL;
829 u64 rspin_threshold = 0;
833 /* sem->wait_lock should not be held when doing optimistic spinning */
834 if (!osq_lock(&sem->osq))
838 * Optimistically spin on the owner field and attempt to acquire the
839 * lock whenever the owner changes. Spinning will be stopped when:
840 * 1) the owning writer isn't running; or
841 * 2) readers own the lock and spinning time has exceeded limit.
844 enum owner_state owner_state;
846 owner_state = rwsem_spin_on_owner(sem);
847 if (!(owner_state & OWNER_SPINNABLE))
851 * Try to acquire the lock
853 taken = rwsem_try_write_lock_unqueued(sem);
859 * Time-based reader-owned rwsem optimistic spinning
861 if (owner_state == OWNER_READER) {
863 * Re-initialize rspin_threshold every time when
864 * the owner state changes from non-reader to reader.
865 * This allows a writer to steal the lock in between
866 * 2 reader phases and have the threshold reset at
867 * the beginning of the 2nd reader phase.
869 if (prev_owner_state != OWNER_READER) {
870 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
872 rspin_threshold = rwsem_rspin_threshold(sem);
877 * Check time threshold once every 16 iterations to
878 * avoid calling sched_clock() too frequently so
879 * as to reduce the average latency between the times
880 * when the lock becomes free and when the spinner
881 * is ready to do a trylock.
883 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
884 rwsem_set_nonspinnable(sem);
885 lockevent_inc(rwsem_opt_nospin);
891 * An RT task cannot do optimistic spinning if it cannot
892 * be sure the lock holder is running or live-lock may
893 * happen if the current task and the lock holder happen
894 * to run in the same CPU. However, aborting optimistic
895 * spinning while a NULL owner is detected may miss some
896 * opportunity where spinning can continue without causing
899 * There are 2 possible cases where an RT task may be able
900 * to continue spinning.
902 * 1) The lock owner is in the process of releasing the
903 * lock, sem->owner is cleared but the lock has not
905 * 2) The lock was free and owner cleared, but another
906 * task just comes in and acquire the lock before
907 * we try to get it. The new owner may be a spinnable
910 * To take advantage of two scenarios listed above, the RT
911 * task is made to retry one more time to see if it can
912 * acquire the lock or continue spinning on the new owning
913 * writer. Of course, if the time lag is long enough or the
914 * new owner is not a writer or spinnable, the RT task will
917 * If the owner is a writer, the need_resched() check is
918 * done inside rwsem_spin_on_owner(). If the owner is not
919 * a writer, need_resched() check needs to be done here.
921 if (owner_state != OWNER_WRITER) {
924 if (rt_task(current) &&
925 (prev_owner_state != OWNER_WRITER))
928 prev_owner_state = owner_state;
931 * The cpu_relax() call is a compiler barrier which forces
932 * everything in this loop to be re-loaded. We don't need
933 * memory barriers as we'll eventually observe the right
934 * values at the cost of a few extra spins.
938 osq_unlock(&sem->osq);
941 lockevent_cond_inc(rwsem_opt_fail, !taken);
946 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
947 * only be called when the reader count reaches 0.
949 static inline void clear_nonspinnable(struct rw_semaphore *sem)
951 if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)))
952 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
956 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
961 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
966 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
968 static inline enum owner_state
969 rwsem_spin_on_owner(struct rw_semaphore *sem)
971 return OWNER_NONSPINNABLE;
976 * Prepare to wake up waiter(s) in the wait queue by putting them into the
977 * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
978 * reader-owned, wake up read lock waiters in queue front or wake up any
979 * front waiter otherwise.
981 * This is being called from both reader and writer slow paths.
983 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count,
984 struct wake_q_head *wake_q)
986 enum rwsem_wake_type wake_type;
988 if (count & RWSEM_WRITER_MASK)
991 if (count & RWSEM_READER_MASK) {
992 wake_type = RWSEM_WAKE_READERS;
994 wake_type = RWSEM_WAKE_ANY;
995 clear_nonspinnable(sem);
997 rwsem_mark_wake(sem, wake_type, wake_q);
1001 * Wait for the read lock to be granted
1003 static struct rw_semaphore __sched *
1004 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
1006 long adjustment = -RWSEM_READER_BIAS;
1007 long rcnt = (count >> RWSEM_READER_SHIFT);
1008 struct rwsem_waiter waiter;
1009 DEFINE_WAKE_Q(wake_q);
1012 * To prevent a constant stream of readers from starving a sleeping
1013 * waiter, don't attempt optimistic lock stealing if the lock is
1014 * currently owned by readers.
1016 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
1017 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
1021 * Reader optimistic lock stealing.
1023 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
1024 rwsem_set_reader_owned(sem);
1025 lockevent_inc(rwsem_rlock_steal);
1028 * Wake up other readers in the wait queue if it is
1031 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
1032 raw_spin_lock_irq(&sem->wait_lock);
1033 if (!list_empty(&sem->wait_list))
1034 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1036 raw_spin_unlock_irq(&sem->wait_lock);
1043 waiter.task = current;
1044 waiter.type = RWSEM_WAITING_FOR_READ;
1045 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1046 waiter.handoff_set = false;
1048 raw_spin_lock_irq(&sem->wait_lock);
1049 if (list_empty(&sem->wait_list)) {
1051 * In case the wait queue is empty and the lock isn't owned
1052 * by a writer, this reader can exit the slowpath and return
1053 * immediately as its RWSEM_READER_BIAS has already been set
1056 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
1057 /* Provide lock ACQUIRE */
1058 smp_acquire__after_ctrl_dep();
1059 raw_spin_unlock_irq(&sem->wait_lock);
1060 rwsem_set_reader_owned(sem);
1061 lockevent_inc(rwsem_rlock_fast);
1064 adjustment += RWSEM_FLAG_WAITERS;
1066 rwsem_add_waiter(sem, &waiter);
1068 /* we're now waiting on the lock, but no longer actively locking */
1069 count = atomic_long_add_return(adjustment, &sem->count);
1071 rwsem_cond_wake_waiter(sem, count, &wake_q);
1072 raw_spin_unlock_irq(&sem->wait_lock);
1074 if (!wake_q_empty(&wake_q))
1077 trace_contention_begin(sem, LCB_F_READ);
1079 /* wait to be given the lock */
1081 set_current_state(state);
1082 if (!smp_load_acquire(&waiter.task)) {
1083 /* Matches rwsem_mark_wake()'s smp_store_release(). */
1086 if (signal_pending_state(state, current)) {
1087 raw_spin_lock_irq(&sem->wait_lock);
1090 raw_spin_unlock_irq(&sem->wait_lock);
1091 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1094 schedule_preempt_disabled();
1095 lockevent_inc(rwsem_sleep_reader);
1098 __set_current_state(TASK_RUNNING);
1099 lockevent_inc(rwsem_rlock);
1100 trace_contention_end(sem, 0);
1104 rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1105 __set_current_state(TASK_RUNNING);
1106 lockevent_inc(rwsem_rlock_fail);
1107 trace_contention_end(sem, -EINTR);
1108 return ERR_PTR(-EINTR);
1112 * Wait until we successfully acquire the write lock
1114 static struct rw_semaphore __sched *
1115 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1117 struct rwsem_waiter waiter;
1118 DEFINE_WAKE_Q(wake_q);
1120 /* do optimistic spinning and steal lock if possible */
1121 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1122 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1127 * Optimistic spinning failed, proceed to the slowpath
1128 * and block until we can acquire the sem.
1130 waiter.task = current;
1131 waiter.type = RWSEM_WAITING_FOR_WRITE;
1132 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1133 waiter.handoff_set = false;
1135 raw_spin_lock_irq(&sem->wait_lock);
1136 rwsem_add_waiter(sem, &waiter);
1138 /* we're now waiting on the lock */
1139 if (rwsem_first_waiter(sem) != &waiter) {
1140 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count),
1142 if (!wake_q_empty(&wake_q)) {
1144 * We want to minimize wait_lock hold time especially
1145 * when a large number of readers are to be woken up.
1147 raw_spin_unlock_irq(&sem->wait_lock);
1149 raw_spin_lock_irq(&sem->wait_lock);
1152 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1155 /* wait until we successfully acquire the lock */
1156 set_current_state(state);
1157 trace_contention_begin(sem, LCB_F_WRITE);
1160 if (rwsem_try_write_lock(sem, &waiter)) {
1161 /* rwsem_try_write_lock() implies ACQUIRE on success */
1165 raw_spin_unlock_irq(&sem->wait_lock);
1167 if (signal_pending_state(state, current))
1171 * After setting the handoff bit and failing to acquire
1172 * the lock, attempt to spin on owner to accelerate lock
1173 * transfer. If the previous owner is a on-cpu writer and it
1174 * has just released the lock, OWNER_NULL will be returned.
1175 * In this case, we attempt to acquire the lock again
1178 if (waiter.handoff_set) {
1179 enum owner_state owner_state;
1182 owner_state = rwsem_spin_on_owner(sem);
1185 if (owner_state == OWNER_NULL)
1190 lockevent_inc(rwsem_sleep_writer);
1191 set_current_state(state);
1193 raw_spin_lock_irq(&sem->wait_lock);
1195 __set_current_state(TASK_RUNNING);
1196 raw_spin_unlock_irq(&sem->wait_lock);
1197 lockevent_inc(rwsem_wlock);
1198 trace_contention_end(sem, 0);
1202 __set_current_state(TASK_RUNNING);
1203 raw_spin_lock_irq(&sem->wait_lock);
1204 rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1205 lockevent_inc(rwsem_wlock_fail);
1206 trace_contention_end(sem, -EINTR);
1207 return ERR_PTR(-EINTR);
1211 * handle waking up a waiter on the semaphore
1212 * - up_read/up_write has decremented the active part of count if we come here
1214 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1216 unsigned long flags;
1217 DEFINE_WAKE_Q(wake_q);
1219 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1221 if (!list_empty(&sem->wait_list))
1222 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1224 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1231 * downgrade a write lock into a read lock
1232 * - caller incremented waiting part of count and discovered it still negative
1233 * - just wake up any readers at the front of the queue
1235 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1237 unsigned long flags;
1238 DEFINE_WAKE_Q(wake_q);
1240 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1242 if (!list_empty(&sem->wait_list))
1243 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1245 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1254 static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
1260 if (!rwsem_read_trylock(sem, &count)) {
1261 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
1265 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1272 static __always_inline void __down_read(struct rw_semaphore *sem)
1274 __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1277 static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
1279 return __down_read_common(sem, TASK_INTERRUPTIBLE);
1282 static __always_inline int __down_read_killable(struct rw_semaphore *sem)
1284 return __down_read_common(sem, TASK_KILLABLE);
1287 static inline int __down_read_trylock(struct rw_semaphore *sem)
1292 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1295 tmp = atomic_long_read(&sem->count);
1296 while (!(tmp & RWSEM_READ_FAILED_MASK)) {
1297 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1298 tmp + RWSEM_READER_BIAS)) {
1299 rwsem_set_reader_owned(sem);
1311 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1313 if (unlikely(!rwsem_write_trylock(sem))) {
1314 if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1321 static inline void __down_write(struct rw_semaphore *sem)
1323 __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1326 static inline int __down_write_killable(struct rw_semaphore *sem)
1328 return __down_write_common(sem, TASK_KILLABLE);
1331 static inline int __down_write_trylock(struct rw_semaphore *sem)
1333 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1334 return rwsem_write_trylock(sem);
1338 * unlock after reading
1340 static inline void __up_read(struct rw_semaphore *sem)
1344 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1345 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1348 rwsem_clear_reader_owned(sem);
1349 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1350 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1351 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1352 RWSEM_FLAG_WAITERS)) {
1353 clear_nonspinnable(sem);
1360 * unlock after writing
1362 static inline void __up_write(struct rw_semaphore *sem)
1366 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1368 * sem->owner may differ from current if the ownership is transferred
1369 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1371 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1372 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1375 rwsem_clear_owner(sem);
1376 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1378 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1383 * downgrade write lock to read lock
1385 static inline void __downgrade_write(struct rw_semaphore *sem)
1390 * When downgrading from exclusive to shared ownership,
1391 * anything inside the write-locked region cannot leak
1392 * into the read side. In contrast, anything in the
1393 * read-locked region is ok to be re-ordered into the
1394 * write side. As such, rely on RELEASE semantics.
1396 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1397 tmp = atomic_long_fetch_add_release(
1398 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1399 rwsem_set_reader_owned(sem);
1400 if (tmp & RWSEM_FLAG_WAITERS)
1401 rwsem_downgrade_wake(sem);
1404 #else /* !CONFIG_PREEMPT_RT */
1406 #define RT_MUTEX_BUILD_MUTEX
1407 #include "rtmutex.c"
1409 #define rwbase_set_and_save_current_state(state) \
1410 set_current_state(state)
1412 #define rwbase_restore_current_state() \
1413 __set_current_state(TASK_RUNNING)
1415 #define rwbase_rtmutex_lock_state(rtm, state) \
1416 __rt_mutex_lock(rtm, state)
1418 #define rwbase_rtmutex_slowlock_locked(rtm, state) \
1419 __rt_mutex_slowlock_locked(rtm, NULL, state)
1421 #define rwbase_rtmutex_unlock(rtm) \
1422 __rt_mutex_unlock(rtm)
1424 #define rwbase_rtmutex_trylock(rtm) \
1425 __rt_mutex_trylock(rtm)
1427 #define rwbase_signal_pending_state(state, current) \
1428 signal_pending_state(state, current)
1430 #define rwbase_schedule() \
1433 #include "rwbase_rt.c"
1435 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1436 struct lock_class_key *key)
1438 init_rwbase_rt(&(sem)->rwbase);
1440 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1441 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1442 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1445 EXPORT_SYMBOL(__init_rwsem);
1447 static inline void __down_read(struct rw_semaphore *sem)
1449 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1452 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1454 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1457 static inline int __down_read_killable(struct rw_semaphore *sem)
1459 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1462 static inline int __down_read_trylock(struct rw_semaphore *sem)
1464 return rwbase_read_trylock(&sem->rwbase);
1467 static inline void __up_read(struct rw_semaphore *sem)
1469 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1472 static inline void __sched __down_write(struct rw_semaphore *sem)
1474 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1477 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1479 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1482 static inline int __down_write_trylock(struct rw_semaphore *sem)
1484 return rwbase_write_trylock(&sem->rwbase);
1487 static inline void __up_write(struct rw_semaphore *sem)
1489 rwbase_write_unlock(&sem->rwbase);
1492 static inline void __downgrade_write(struct rw_semaphore *sem)
1494 rwbase_write_downgrade(&sem->rwbase);
1497 /* Debug stubs for the common API */
1498 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1500 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1501 struct task_struct *owner)
1505 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1507 int count = atomic_read(&sem->rwbase.readers);
1509 return count < 0 && count != READER_BIAS;
1512 #endif /* CONFIG_PREEMPT_RT */
1517 void __sched down_read(struct rw_semaphore *sem)
1520 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1522 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1524 EXPORT_SYMBOL(down_read);
1526 int __sched down_read_interruptible(struct rw_semaphore *sem)
1529 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1531 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1532 rwsem_release(&sem->dep_map, _RET_IP_);
1538 EXPORT_SYMBOL(down_read_interruptible);
1540 int __sched down_read_killable(struct rw_semaphore *sem)
1543 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1545 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1546 rwsem_release(&sem->dep_map, _RET_IP_);
1552 EXPORT_SYMBOL(down_read_killable);
1555 * trylock for reading -- returns 1 if successful, 0 if contention
1557 int down_read_trylock(struct rw_semaphore *sem)
1559 int ret = __down_read_trylock(sem);
1562 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1565 EXPORT_SYMBOL(down_read_trylock);
1570 void __sched down_write(struct rw_semaphore *sem)
1573 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1574 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1576 EXPORT_SYMBOL(down_write);
1581 int __sched down_write_killable(struct rw_semaphore *sem)
1584 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1586 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1587 __down_write_killable)) {
1588 rwsem_release(&sem->dep_map, _RET_IP_);
1594 EXPORT_SYMBOL(down_write_killable);
1597 * trylock for writing -- returns 1 if successful, 0 if contention
1599 int down_write_trylock(struct rw_semaphore *sem)
1601 int ret = __down_write_trylock(sem);
1604 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1608 EXPORT_SYMBOL(down_write_trylock);
1611 * release a read lock
1613 void up_read(struct rw_semaphore *sem)
1615 rwsem_release(&sem->dep_map, _RET_IP_);
1618 EXPORT_SYMBOL(up_read);
1621 * release a write lock
1623 void up_write(struct rw_semaphore *sem)
1625 rwsem_release(&sem->dep_map, _RET_IP_);
1628 EXPORT_SYMBOL(up_write);
1631 * downgrade write lock to read lock
1633 void downgrade_write(struct rw_semaphore *sem)
1635 lock_downgrade(&sem->dep_map, _RET_IP_);
1636 __downgrade_write(sem);
1638 EXPORT_SYMBOL(downgrade_write);
1640 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1642 void down_read_nested(struct rw_semaphore *sem, int subclass)
1645 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1646 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1648 EXPORT_SYMBOL(down_read_nested);
1650 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1653 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1655 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1656 rwsem_release(&sem->dep_map, _RET_IP_);
1662 EXPORT_SYMBOL(down_read_killable_nested);
1664 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1667 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1668 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1670 EXPORT_SYMBOL(_down_write_nest_lock);
1672 void down_read_non_owner(struct rw_semaphore *sem)
1677 * The owner value for a reader-owned lock is mostly for debugging
1678 * purpose only and is not critical to the correct functioning of
1679 * rwsem. So it is perfectly fine to set it in a preempt-enabled
1682 __rwsem_set_reader_owned(sem, NULL);
1684 EXPORT_SYMBOL(down_read_non_owner);
1686 void down_write_nested(struct rw_semaphore *sem, int subclass)
1689 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1690 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1692 EXPORT_SYMBOL(down_write_nested);
1694 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1697 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1699 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1700 __down_write_killable)) {
1701 rwsem_release(&sem->dep_map, _RET_IP_);
1707 EXPORT_SYMBOL(down_write_killable_nested);
1709 void up_read_non_owner(struct rw_semaphore *sem)
1711 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1714 EXPORT_SYMBOL(up_read_non_owner);