1 // SPDX-License-Identifier: GPL-2.0-only
3 * PREEMPT_RT substitution for spin/rw_locks
5 * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
6 * resemble the non RT semantics:
8 * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
9 * preserving. The task state is saved before blocking on the underlying
10 * rtmutex, and restored when the lock has been acquired. Regular wakeups
11 * during that time are redirected to the saved state so no wake up is
14 * - Non RT spin/rwlocks disable preemption and eventually interrupts.
15 * Disabling preemption has the side effect of disabling migration and
16 * preventing RCU grace periods.
18 * The RT substitutions explicitly disable migration and take
19 * rcu_read_lock() across the lock held section.
21 #include <linux/spinlock.h>
22 #include <linux/export.h>
24 #define RT_MUTEX_BUILD_SPINLOCKS
28 * __might_resched() skips the state check as rtlocks are state
29 * preserving. Take RCU nesting into account as spin/read/write_lock() can
30 * legitimately nest into an RCU read side critical section.
32 #define RTLOCK_RESCHED_OFFSETS \
33 (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
35 #define rtlock_might_resched() \
36 __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
38 static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
40 if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
44 static __always_inline void __rt_spin_lock(spinlock_t *lock)
46 rtlock_might_resched();
47 rtlock_lock(&lock->lock);
52 void __sched rt_spin_lock(spinlock_t *lock)
54 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
57 EXPORT_SYMBOL(rt_spin_lock);
59 #ifdef CONFIG_DEBUG_LOCK_ALLOC
60 void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
62 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
65 EXPORT_SYMBOL(rt_spin_lock_nested);
67 void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
68 struct lockdep_map *nest_lock)
70 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
73 EXPORT_SYMBOL(rt_spin_lock_nest_lock);
76 void __sched rt_spin_unlock(spinlock_t *lock)
78 spin_release(&lock->dep_map, _RET_IP_);
82 if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
83 rt_mutex_slowunlock(&lock->lock);
85 EXPORT_SYMBOL(rt_spin_unlock);
88 * Wait for the lock to get unlocked: instead of polling for an unlock
89 * (like raw spinlocks do), lock and unlock, to force the kernel to
90 * schedule if there's contention:
92 void __sched rt_spin_lock_unlock(spinlock_t *lock)
97 EXPORT_SYMBOL(rt_spin_lock_unlock);
99 static __always_inline int __rt_spin_trylock(spinlock_t *lock)
103 if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
104 ret = rt_mutex_slowtrylock(&lock->lock);
107 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
114 int __sched rt_spin_trylock(spinlock_t *lock)
116 return __rt_spin_trylock(lock);
118 EXPORT_SYMBOL(rt_spin_trylock);
120 int __sched rt_spin_trylock_bh(spinlock_t *lock)
125 ret = __rt_spin_trylock(lock);
130 EXPORT_SYMBOL(rt_spin_trylock_bh);
132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
133 void __rt_spin_lock_init(spinlock_t *lock, const char *name,
134 struct lock_class_key *key, bool percpu)
136 u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
138 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
139 lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
142 EXPORT_SYMBOL(__rt_spin_lock_init);
146 * RT-specific reader/writer locks
148 #define rwbase_set_and_save_current_state(state) \
149 current_save_and_set_rtlock_wait_state()
151 #define rwbase_restore_current_state() \
152 current_restore_rtlock_saved_state()
154 static __always_inline int
155 rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
157 if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
158 rtlock_slowlock(rtm);
162 static __always_inline int
163 rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
165 rtlock_slowlock_locked(rtm);
169 static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
171 if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
174 rt_mutex_slowunlock(rtm);
177 static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
179 if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
182 return rt_mutex_slowtrylock(rtm);
185 #define rwbase_signal_pending_state(state, current) (0)
187 #define rwbase_schedule() \
190 #include "rwbase_rt.c"
192 * The common functions which get wrapped into the rwlock API.
194 int __sched rt_read_trylock(rwlock_t *rwlock)
198 ret = rwbase_read_trylock(&rwlock->rwbase);
200 rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
206 EXPORT_SYMBOL(rt_read_trylock);
208 int __sched rt_write_trylock(rwlock_t *rwlock)
212 ret = rwbase_write_trylock(&rwlock->rwbase);
214 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
220 EXPORT_SYMBOL(rt_write_trylock);
222 void __sched rt_read_lock(rwlock_t *rwlock)
224 rtlock_might_resched();
225 rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
226 rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
230 EXPORT_SYMBOL(rt_read_lock);
232 void __sched rt_write_lock(rwlock_t *rwlock)
234 rtlock_might_resched();
235 rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
236 rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
240 EXPORT_SYMBOL(rt_write_lock);
242 #ifdef CONFIG_DEBUG_LOCK_ALLOC
243 void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
245 rtlock_might_resched();
246 rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
247 rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
251 EXPORT_SYMBOL(rt_write_lock_nested);
254 void __sched rt_read_unlock(rwlock_t *rwlock)
256 rwlock_release(&rwlock->dep_map, _RET_IP_);
259 rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
261 EXPORT_SYMBOL(rt_read_unlock);
263 void __sched rt_write_unlock(rwlock_t *rwlock)
265 rwlock_release(&rwlock->dep_map, _RET_IP_);
268 rwbase_write_unlock(&rwlock->rwbase);
270 EXPORT_SYMBOL(rt_write_unlock);
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
273 void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
274 struct lock_class_key *key)
276 debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
277 lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
279 EXPORT_SYMBOL(__rt_rwlock_init);