Merge tag 'x86_cpu_for_v6.0_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / kernel / locking / rwbase_rt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4  * RT-specific reader/writer semaphores and reader/writer locks
5  *
6  * down_write/write_lock()
7  *  1) Lock rtmutex
8  *  2) Remove the reader BIAS to force readers into the slow path
9  *  3) Wait until all readers have left the critical section
10  *  4) Mark it write locked
11  *
12  * up_write/write_unlock()
13  *  1) Remove the write locked marker
14  *  2) Set the reader BIAS, so readers can use the fast path again
15  *  3) Unlock rtmutex, to release blocked readers
16  *
17  * down_read/read_lock()
18  *  1) Try fast path acquisition (reader BIAS is set)
19  *  2) Take tmutex::wait_lock, which protects the writelocked flag
20  *  3) If !writelocked, acquire it for read
21  *  4) If writelocked, block on tmutex
22  *  5) unlock rtmutex, goto 1)
23  *
24  * up_read/read_unlock()
25  *  1) Try fast path release (reader count != 1)
26  *  2) Wake the writer waiting in down_write()/write_lock() #3
27  *
28  * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29  * locks on RT are not writer fair, but writers, which should be avoided in
30  * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31  * inheritance mechanism.
32  *
33  * It's possible to make the rw primitives writer fair by keeping a list of
34  * active readers. A blocked writer would force all newly incoming readers
35  * to block on the rtmutex, but the rtmutex would have to be proxy locked
36  * for one reader after the other. We can't use multi-reader inheritance
37  * because there is no way to support that with SCHED_DEADLINE.
38  * Implementing the one by one reader boosting/handover mechanism is a
39  * major surgery for a very dubious value.
40  *
41  * The risk of writer starvation is there, but the pathological use cases
42  * which trigger it are not necessarily the typical RT workloads.
43  *
44  * Fast-path orderings:
45  * The lock/unlock of readers can run in fast paths: lock and unlock are only
46  * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
47  * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
48  * and _release() (or stronger).
49  *
50  * Common code shared between RT rw_semaphore and rwlock
51  */
52
53 static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
54 {
55         int r;
56
57         /*
58          * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
59          * set.
60          */
61         for (r = atomic_read(&rwb->readers); r < 0;) {
62                 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
63                         return 1;
64         }
65         return 0;
66 }
67
68 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
69                                       unsigned int state)
70 {
71         struct rt_mutex_base *rtm = &rwb->rtmutex;
72         int ret;
73
74         raw_spin_lock_irq(&rtm->wait_lock);
75         /*
76          * Allow readers, as long as the writer has not completely
77          * acquired the semaphore for write.
78          */
79         if (atomic_read(&rwb->readers) != WRITER_BIAS) {
80                 atomic_inc(&rwb->readers);
81                 raw_spin_unlock_irq(&rtm->wait_lock);
82                 return 0;
83         }
84
85         /*
86          * Call into the slow lock path with the rtmutex->wait_lock
87          * held, so this can't result in the following race:
88          *
89          * Reader1              Reader2         Writer
90          *                      down_read()
91          *                                      down_write()
92          *                                      rtmutex_lock(m)
93          *                                      wait()
94          * down_read()
95          * unlock(m->wait_lock)
96          *                      up_read()
97          *                      wake(Writer)
98          *                                      lock(m->wait_lock)
99          *                                      sem->writelocked=true
100          *                                      unlock(m->wait_lock)
101          *
102          *                                      up_write()
103          *                                      sem->writelocked=false
104          *                                      rtmutex_unlock(m)
105          *                      down_read()
106          *                                      down_write()
107          *                                      rtmutex_lock(m)
108          *                                      wait()
109          * rtmutex_lock(m)
110          *
111          * That would put Reader1 behind the writer waiting on
112          * Reader2 to call up_read(), which might be unbound.
113          */
114
115         trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ);
116
117         /*
118          * For rwlocks this returns 0 unconditionally, so the below
119          * !ret conditionals are optimized out.
120          */
121         ret = rwbase_rtmutex_slowlock_locked(rtm, state);
122
123         /*
124          * On success the rtmutex is held, so there can't be a writer
125          * active. Increment the reader count and immediately drop the
126          * rtmutex again.
127          *
128          * rtmutex->wait_lock has to be unlocked in any case of course.
129          */
130         if (!ret)
131                 atomic_inc(&rwb->readers);
132         raw_spin_unlock_irq(&rtm->wait_lock);
133         if (!ret)
134                 rwbase_rtmutex_unlock(rtm);
135
136         trace_contention_end(rwb, ret);
137         return ret;
138 }
139
140 static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
141                                             unsigned int state)
142 {
143         if (rwbase_read_trylock(rwb))
144                 return 0;
145
146         return __rwbase_read_lock(rwb, state);
147 }
148
149 static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
150                                          unsigned int state)
151 {
152         struct rt_mutex_base *rtm = &rwb->rtmutex;
153         struct task_struct *owner;
154         DEFINE_RT_WAKE_Q(wqh);
155
156         raw_spin_lock_irq(&rtm->wait_lock);
157         /*
158          * Wake the writer, i.e. the rtmutex owner. It might release the
159          * rtmutex concurrently in the fast path (due to a signal), but to
160          * clean up rwb->readers it needs to acquire rtm->wait_lock. The
161          * worst case which can happen is a spurious wakeup.
162          */
163         owner = rt_mutex_owner(rtm);
164         if (owner)
165                 rt_mutex_wake_q_add_task(&wqh, owner, state);
166
167         /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
168         preempt_disable();
169         raw_spin_unlock_irq(&rtm->wait_lock);
170         rt_mutex_wake_up_q(&wqh);
171 }
172
173 static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
174                                                unsigned int state)
175 {
176         /*
177          * rwb->readers can only hit 0 when a writer is waiting for the
178          * active readers to leave the critical section.
179          *
180          * dec_and_test() is fully ordered, provides RELEASE.
181          */
182         if (unlikely(atomic_dec_and_test(&rwb->readers)))
183                 __rwbase_read_unlock(rwb, state);
184 }
185
186 static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
187                                          unsigned long flags)
188 {
189         struct rt_mutex_base *rtm = &rwb->rtmutex;
190
191         /*
192          * _release() is needed in case that reader is in fast path, pairing
193          * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
194          */
195         (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
196         raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
197         rwbase_rtmutex_unlock(rtm);
198 }
199
200 static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
201 {
202         struct rt_mutex_base *rtm = &rwb->rtmutex;
203         unsigned long flags;
204
205         raw_spin_lock_irqsave(&rtm->wait_lock, flags);
206         __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
207 }
208
209 static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
210 {
211         struct rt_mutex_base *rtm = &rwb->rtmutex;
212         unsigned long flags;
213
214         raw_spin_lock_irqsave(&rtm->wait_lock, flags);
215         /* Release it and account current as reader */
216         __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
217 }
218
219 static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
220 {
221         /* Can do without CAS because we're serialized by wait_lock. */
222         lockdep_assert_held(&rwb->rtmutex.wait_lock);
223
224         /*
225          * _acquire is needed in case the reader is in the fast path, pairing
226          * with rwbase_read_unlock(), provides ACQUIRE.
227          */
228         if (!atomic_read_acquire(&rwb->readers)) {
229                 atomic_set(&rwb->readers, WRITER_BIAS);
230                 return 1;
231         }
232
233         return 0;
234 }
235
236 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
237                                      unsigned int state)
238 {
239         struct rt_mutex_base *rtm = &rwb->rtmutex;
240         unsigned long flags;
241
242         /* Take the rtmutex as a first step */
243         if (rwbase_rtmutex_lock_state(rtm, state))
244                 return -EINTR;
245
246         /* Force readers into slow path */
247         atomic_sub(READER_BIAS, &rwb->readers);
248
249         raw_spin_lock_irqsave(&rtm->wait_lock, flags);
250         if (__rwbase_write_trylock(rwb))
251                 goto out_unlock;
252
253         rwbase_set_and_save_current_state(state);
254         trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE);
255         for (;;) {
256                 /* Optimized out for rwlocks */
257                 if (rwbase_signal_pending_state(state, current)) {
258                         rwbase_restore_current_state();
259                         __rwbase_write_unlock(rwb, 0, flags);
260                         trace_contention_end(rwb, -EINTR);
261                         return -EINTR;
262                 }
263
264                 if (__rwbase_write_trylock(rwb))
265                         break;
266
267                 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
268                 rwbase_schedule();
269                 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
270
271                 set_current_state(state);
272         }
273         rwbase_restore_current_state();
274         trace_contention_end(rwb, 0);
275
276 out_unlock:
277         raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
278         return 0;
279 }
280
281 static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
282 {
283         struct rt_mutex_base *rtm = &rwb->rtmutex;
284         unsigned long flags;
285
286         if (!rwbase_rtmutex_trylock(rtm))
287                 return 0;
288
289         atomic_sub(READER_BIAS, &rwb->readers);
290
291         raw_spin_lock_irqsave(&rtm->wait_lock, flags);
292         if (__rwbase_write_trylock(rwb)) {
293                 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
294                 return 1;
295         }
296         __rwbase_write_unlock(rwb, 0, flags);
297         return 0;
298 }