4 * The ldisc semaphore is semantically a rw_semaphore but which enforces
5 * an alternate policy, namely:
6 * 1) Supports lock wait timeouts
7 * 2) Write waiter has priority
8 * 3) Downgrading is not supported
10 * Implementation notes:
11 * 1) Upper half of semaphore count is a wait count (differs from rwsem
12 * in that rwsem normalizes the upper half to the wait bias)
13 * 2) Lacks overflow checking
15 * The generic counting was copied and modified from include/asm-generic/rwsem.h
16 * by Paul Mackerras <paulus@samba.org>.
18 * The scheduling policy was copied and modified from lib/rwsem.c
19 * Written by David Howells (dhowells@redhat.com).
21 * This implementation incorporates the write lock stealing work of
22 * Michel Lespinasse <walken@google.com>.
24 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
26 * This file may be redistributed under the terms of the GNU General Public
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/atomic.h>
33 #include <linux/tty.h>
34 #include <linux/sched.h>
37 #ifdef CONFIG_DEBUG_LOCK_ALLOC
38 # define __acq(l, s, t, r, c, n, i) \
39 lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
40 # define __rel(l, n, i) \
41 lock_release(&(l)->dep_map, n, i)
42 # ifdef CONFIG_PROVE_LOCKING
43 # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
44 # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
45 # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
46 # define lockdep_release(l, n, i) __rel(l, n, i)
48 # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
49 # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
50 # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
51 # define lockdep_release(l, n, i) __rel(l, n, i)
54 # define lockdep_acquire(l, s, t, i) do { } while (0)
55 # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
56 # define lockdep_acquire_read(l, s, t, i) do { } while (0)
57 # define lockdep_release(l, n, i) do { } while (0)
60 #ifdef CONFIG_LOCK_STAT
61 # define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_)
63 # define lock_stat(_lock, stat) do { } while (0)
67 #if BITS_PER_LONG == 64
68 # define LDSEM_ACTIVE_MASK 0xffffffffL
70 # define LDSEM_ACTIVE_MASK 0x0000ffffL
73 #define LDSEM_UNLOCKED 0L
74 #define LDSEM_ACTIVE_BIAS 1L
75 #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1)
76 #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS
77 #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
80 struct list_head list;
81 struct task_struct *task;
84 static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
89 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
92 *old = atomic_long_cmpxchg(&sem->count, *old, new);
97 * Initialize an ldsem:
99 void __init_ldsem(struct ld_semaphore *sem, const char *name,
100 struct lock_class_key *key)
102 #ifdef CONFIG_DEBUG_LOCK_ALLOC
104 * Make sure we are not reinitializing a held semaphore:
106 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
107 lockdep_init_map(&sem->dep_map, name, key, 0);
109 sem->count = LDSEM_UNLOCKED;
110 sem->wait_readers = 0;
111 raw_spin_lock_init(&sem->wait_lock);
112 INIT_LIST_HEAD(&sem->read_wait);
113 INIT_LIST_HEAD(&sem->write_wait);
116 static void __ldsem_wake_readers(struct ld_semaphore *sem)
118 struct ldsem_waiter *waiter, *next;
119 struct task_struct *tsk;
122 /* Try to grant read locks to all readers on the read wait list.
123 * Note the 'active part' of the count is incremented by
124 * the number of readers before waking any processes up.
126 adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
127 count = ldsem_atomic_update(adjust, sem);
131 if (ldsem_cmpxchg(&count, count - adjust, sem))
135 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
139 wake_up_process(tsk);
140 put_task_struct(tsk);
142 INIT_LIST_HEAD(&sem->read_wait);
143 sem->wait_readers = 0;
146 static inline int writer_trylock(struct ld_semaphore *sem)
148 /* only wake this writer if the active part of the count can be
149 * transitioned from 0 -> 1
151 long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
153 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
155 if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
160 static void __ldsem_wake_writer(struct ld_semaphore *sem)
162 struct ldsem_waiter *waiter;
164 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
165 wake_up_process(waiter->task);
169 * handle the lock release when processes blocked on it that can now run
170 * - if we come here from up_xxxx(), then:
171 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
172 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
173 * - the spinlock must be held by the caller
174 * - woken process blocks are discarded from the list after having task zeroed
176 static void __ldsem_wake(struct ld_semaphore *sem)
178 if (!list_empty(&sem->write_wait))
179 __ldsem_wake_writer(sem);
180 else if (!list_empty(&sem->read_wait))
181 __ldsem_wake_readers(sem);
184 static void ldsem_wake(struct ld_semaphore *sem)
188 raw_spin_lock_irqsave(&sem->wait_lock, flags);
190 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
194 * wait for the read lock to be granted
196 static struct ld_semaphore __sched *
197 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
199 struct ldsem_waiter waiter;
200 struct task_struct *tsk = current;
201 long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
203 /* set up my own style of waitqueue */
204 raw_spin_lock_irq(&sem->wait_lock);
206 /* Try to reverse the lock attempt but if the count has changed
207 * so that reversing fails, check if there are are no waiters,
208 * and early-out if not */
210 if (ldsem_cmpxchg(&count, count + adjust, sem))
213 raw_spin_unlock_irq(&sem->wait_lock);
218 list_add_tail(&waiter.list, &sem->read_wait);
222 get_task_struct(tsk);
224 /* if there are no active locks, wake the new lock owner(s) */
225 if ((count & LDSEM_ACTIVE_MASK) == 0)
228 raw_spin_unlock_irq(&sem->wait_lock);
230 /* wait to be given the lock */
232 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
238 timeout = schedule_timeout(timeout);
241 __set_task_state(tsk, TASK_RUNNING);
244 /* lock timed out but check if this task was just
245 * granted lock ownership - if so, pretend there
246 * was no timeout; otherwise, cleanup lock wait */
247 raw_spin_lock_irq(&sem->wait_lock);
249 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
250 list_del(&waiter.list);
251 raw_spin_unlock_irq(&sem->wait_lock);
252 put_task_struct(waiter.task);
255 raw_spin_unlock_irq(&sem->wait_lock);
262 * wait for the write lock to be granted
264 static struct ld_semaphore __sched *
265 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
267 struct ldsem_waiter waiter;
268 struct task_struct *tsk = current;
269 long adjust = -LDSEM_ACTIVE_BIAS;
272 /* set up my own style of waitqueue */
273 raw_spin_lock_irq(&sem->wait_lock);
275 /* Try to reverse the lock attempt but if the count has changed
276 * so that reversing fails, check if the lock is now owned,
277 * and early-out if so */
279 if (ldsem_cmpxchg(&count, count + adjust, sem))
281 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
282 raw_spin_unlock_irq(&sem->wait_lock);
287 list_add_tail(&waiter.list, &sem->write_wait);
291 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
295 raw_spin_unlock_irq(&sem->wait_lock);
296 timeout = schedule_timeout(timeout);
297 raw_spin_lock_irq(&sem->wait_lock);
298 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
299 if ((locked = writer_trylock(sem)))
304 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
305 list_del(&waiter.list);
306 raw_spin_unlock_irq(&sem->wait_lock);
308 __set_task_state(tsk, TASK_RUNNING);
310 /* lock wait may have timed out */
318 static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
319 int subclass, long timeout)
323 lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
325 count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
327 lock_stat(sem, contended);
328 if (!down_read_failed(sem, count, timeout)) {
329 lockdep_release(sem, 1, _RET_IP_);
333 lock_stat(sem, acquired);
337 static inline int __ldsem_down_write_nested(struct ld_semaphore *sem,
338 int subclass, long timeout)
342 lockdep_acquire(sem, subclass, 0, _RET_IP_);
344 count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
345 if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
346 lock_stat(sem, contended);
347 if (!down_write_failed(sem, count, timeout)) {
348 lockdep_release(sem, 1, _RET_IP_);
352 lock_stat(sem, acquired);
358 * lock for reading -- returns 1 if successful, 0 if timed out
360 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
363 return __ldsem_down_read_nested(sem, 0, timeout);
367 * trylock for reading -- returns 1 if successful, 0 if contention
369 int ldsem_down_read_trylock(struct ld_semaphore *sem)
371 long count = sem->count;
374 if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
375 lockdep_acquire_read(sem, 0, 1, _RET_IP_);
376 lock_stat(sem, acquired);
384 * lock for writing -- returns 1 if successful, 0 if timed out
386 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
389 return __ldsem_down_write_nested(sem, 0, timeout);
393 * trylock for writing -- returns 1 if successful, 0 if contention
395 int ldsem_down_write_trylock(struct ld_semaphore *sem)
397 long count = sem->count;
399 while ((count & LDSEM_ACTIVE_MASK) == 0) {
400 if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
401 lockdep_acquire(sem, 0, 1, _RET_IP_);
402 lock_stat(sem, acquired);
410 * release a read lock
412 void ldsem_up_read(struct ld_semaphore *sem)
416 lockdep_release(sem, 1, _RET_IP_);
418 count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
419 if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
424 * release a write lock
426 void ldsem_up_write(struct ld_semaphore *sem)
430 lockdep_release(sem, 1, _RET_IP_);
432 count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
438 #ifdef CONFIG_DEBUG_LOCK_ALLOC
440 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
443 return __ldsem_down_read_nested(sem, subclass, timeout);
446 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
450 return __ldsem_down_write_nested(sem, subclass, timeout);