perf/core: Fix narrow startup race when creating the perf nr_addr_filters sysfs file
[platform/kernel/linux-starfive.git] / kernel / locking / spinlock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (2004) Linus Torvalds
4  *
5  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6  *
7  * Copyright (2004, 2005) Ingo Molnar
8  *
9  * This file contains the spinlock/rwlock implementations for the
10  * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
11  *
12  * Note that some architectures have special knowledge about the
13  * stack frames of these functions in their profile_pc. If you
14  * change anything significant here that could change the stack
15  * frame contact the architecture maintainers.
16  */
17
18 #include <linux/linkage.h>
19 #include <linux/preempt.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/debug_locks.h>
23 #include <linux/export.h>
24
25 #ifdef CONFIG_MMIOWB
26 #ifndef arch_mmiowb_state
27 DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
28 EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
29 #endif
30 #endif
31
32 /*
33  * If lockdep is enabled then we use the non-preemption spin-ops
34  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
35  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
36  */
37 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
38 /*
39  * The __lock_function inlines are taken from
40  * spinlock : include/linux/spinlock_api_smp.h
41  * rwlock   : include/linux/rwlock_api_smp.h
42  */
43 #else
44
45 /*
46  * Some architectures can relax in favour of the CPU owning the lock.
47  */
48 #ifndef arch_read_relax
49 # define arch_read_relax(l)     cpu_relax()
50 #endif
51 #ifndef arch_write_relax
52 # define arch_write_relax(l)    cpu_relax()
53 #endif
54 #ifndef arch_spin_relax
55 # define arch_spin_relax(l)     cpu_relax()
56 #endif
57
58 /*
59  * We build the __lock_function inlines here. They are too large for
60  * inlining all over the place, but here is only one user per function
61  * which embeds them into the calling _lock_function below.
62  *
63  * This could be a long-held lock. We both prepare to spin for a long
64  * time (making _this_ CPU preemptible if possible), and we also signal
65  * towards that other CPU that it should break the lock ASAP.
66  */
67 #define BUILD_LOCK_OPS(op, locktype)                                    \
68 void __lockfunc __raw_##op##_lock(locktype##_t *lock)                   \
69 {                                                                       \
70         for (;;) {                                                      \
71                 preempt_disable();                                      \
72                 if (likely(do_raw_##op##_trylock(lock)))                \
73                         break;                                          \
74                 preempt_enable();                                       \
75                                                                         \
76                 arch_##op##_relax(&lock->raw_lock);                     \
77         }                                                               \
78 }                                                                       \
79                                                                         \
80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)  \
81 {                                                                       \
82         unsigned long flags;                                            \
83                                                                         \
84         for (;;) {                                                      \
85                 preempt_disable();                                      \
86                 local_irq_save(flags);                                  \
87                 if (likely(do_raw_##op##_trylock(lock)))                \
88                         break;                                          \
89                 local_irq_restore(flags);                               \
90                 preempt_enable();                                       \
91                                                                         \
92                 arch_##op##_relax(&lock->raw_lock);                     \
93         }                                                               \
94                                                                         \
95         return flags;                                                   \
96 }                                                                       \
97                                                                         \
98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)               \
99 {                                                                       \
100         _raw_##op##_lock_irqsave(lock);                                 \
101 }                                                                       \
102                                                                         \
103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)                \
104 {                                                                       \
105         unsigned long flags;                                            \
106                                                                         \
107         /*                                                      */      \
108         /* Careful: we must exclude softirqs too, hence the     */      \
109         /* irq-disabling. We use the generic preemption-aware   */      \
110         /* function:                                            */      \
111         /**/                                                            \
112         flags = _raw_##op##_lock_irqsave(lock);                         \
113         local_bh_disable();                                             \
114         local_irq_restore(flags);                                       \
115 }                                                                       \
116
117 /*
118  * Build preemption-friendly versions of the following
119  * lock-spinning functions:
120  *
121  *         __[spin|read|write]_lock()
122  *         __[spin|read|write]_lock_irq()
123  *         __[spin|read|write]_lock_irqsave()
124  *         __[spin|read|write]_lock_bh()
125  */
126 BUILD_LOCK_OPS(spin, raw_spinlock);
127
128 #ifndef CONFIG_PREEMPT_RT
129 BUILD_LOCK_OPS(read, rwlock);
130 BUILD_LOCK_OPS(write, rwlock);
131 #endif
132
133 #endif
134
135 #ifndef CONFIG_INLINE_SPIN_TRYLOCK
136 noinline int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
137 {
138         return __raw_spin_trylock(lock);
139 }
140 EXPORT_SYMBOL(_raw_spin_trylock);
141 #endif
142
143 #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
144 noinline int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
145 {
146         return __raw_spin_trylock_bh(lock);
147 }
148 EXPORT_SYMBOL(_raw_spin_trylock_bh);
149 #endif
150
151 #ifndef CONFIG_INLINE_SPIN_LOCK
152 noinline void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
153 {
154         __raw_spin_lock(lock);
155 }
156 EXPORT_SYMBOL(_raw_spin_lock);
157 #endif
158
159 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
160 noinline unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
161 {
162         return __raw_spin_lock_irqsave(lock);
163 }
164 EXPORT_SYMBOL(_raw_spin_lock_irqsave);
165 #endif
166
167 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
168 noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
169 {
170         __raw_spin_lock_irq(lock);
171 }
172 EXPORT_SYMBOL(_raw_spin_lock_irq);
173 #endif
174
175 #ifndef CONFIG_INLINE_SPIN_LOCK_BH
176 noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
177 {
178         __raw_spin_lock_bh(lock);
179 }
180 EXPORT_SYMBOL(_raw_spin_lock_bh);
181 #endif
182
183 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK
184 noinline void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
185 {
186         __raw_spin_unlock(lock);
187 }
188 EXPORT_SYMBOL(_raw_spin_unlock);
189 #endif
190
191 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
192 noinline void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
193 {
194         __raw_spin_unlock_irqrestore(lock, flags);
195 }
196 EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
197 #endif
198
199 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
200 noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
201 {
202         __raw_spin_unlock_irq(lock);
203 }
204 EXPORT_SYMBOL(_raw_spin_unlock_irq);
205 #endif
206
207 #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
208 noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
209 {
210         __raw_spin_unlock_bh(lock);
211 }
212 EXPORT_SYMBOL(_raw_spin_unlock_bh);
213 #endif
214
215 #ifndef CONFIG_PREEMPT_RT
216
217 #ifndef CONFIG_INLINE_READ_TRYLOCK
218 noinline int __lockfunc _raw_read_trylock(rwlock_t *lock)
219 {
220         return __raw_read_trylock(lock);
221 }
222 EXPORT_SYMBOL(_raw_read_trylock);
223 #endif
224
225 #ifndef CONFIG_INLINE_READ_LOCK
226 noinline void __lockfunc _raw_read_lock(rwlock_t *lock)
227 {
228         __raw_read_lock(lock);
229 }
230 EXPORT_SYMBOL(_raw_read_lock);
231 #endif
232
233 #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
234 noinline unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
235 {
236         return __raw_read_lock_irqsave(lock);
237 }
238 EXPORT_SYMBOL(_raw_read_lock_irqsave);
239 #endif
240
241 #ifndef CONFIG_INLINE_READ_LOCK_IRQ
242 noinline void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
243 {
244         __raw_read_lock_irq(lock);
245 }
246 EXPORT_SYMBOL(_raw_read_lock_irq);
247 #endif
248
249 #ifndef CONFIG_INLINE_READ_LOCK_BH
250 noinline void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
251 {
252         __raw_read_lock_bh(lock);
253 }
254 EXPORT_SYMBOL(_raw_read_lock_bh);
255 #endif
256
257 #ifndef CONFIG_INLINE_READ_UNLOCK
258 noinline void __lockfunc _raw_read_unlock(rwlock_t *lock)
259 {
260         __raw_read_unlock(lock);
261 }
262 EXPORT_SYMBOL(_raw_read_unlock);
263 #endif
264
265 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
266 noinline void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
267 {
268         __raw_read_unlock_irqrestore(lock, flags);
269 }
270 EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
271 #endif
272
273 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
274 noinline void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
275 {
276         __raw_read_unlock_irq(lock);
277 }
278 EXPORT_SYMBOL(_raw_read_unlock_irq);
279 #endif
280
281 #ifndef CONFIG_INLINE_READ_UNLOCK_BH
282 noinline void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
283 {
284         __raw_read_unlock_bh(lock);
285 }
286 EXPORT_SYMBOL(_raw_read_unlock_bh);
287 #endif
288
289 #ifndef CONFIG_INLINE_WRITE_TRYLOCK
290 noinline int __lockfunc _raw_write_trylock(rwlock_t *lock)
291 {
292         return __raw_write_trylock(lock);
293 }
294 EXPORT_SYMBOL(_raw_write_trylock);
295 #endif
296
297 #ifndef CONFIG_INLINE_WRITE_LOCK
298 noinline void __lockfunc _raw_write_lock(rwlock_t *lock)
299 {
300         __raw_write_lock(lock);
301 }
302 EXPORT_SYMBOL(_raw_write_lock);
303
304 #ifndef CONFIG_DEBUG_LOCK_ALLOC
305 #define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock)))
306 #endif
307
308 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
309 {
310         __raw_write_lock_nested(lock, subclass);
311 }
312 EXPORT_SYMBOL(_raw_write_lock_nested);
313 #endif
314
315 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
316 noinline unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
317 {
318         return __raw_write_lock_irqsave(lock);
319 }
320 EXPORT_SYMBOL(_raw_write_lock_irqsave);
321 #endif
322
323 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
324 noinline void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
325 {
326         __raw_write_lock_irq(lock);
327 }
328 EXPORT_SYMBOL(_raw_write_lock_irq);
329 #endif
330
331 #ifndef CONFIG_INLINE_WRITE_LOCK_BH
332 noinline void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
333 {
334         __raw_write_lock_bh(lock);
335 }
336 EXPORT_SYMBOL(_raw_write_lock_bh);
337 #endif
338
339 #ifndef CONFIG_INLINE_WRITE_UNLOCK
340 noinline void __lockfunc _raw_write_unlock(rwlock_t *lock)
341 {
342         __raw_write_unlock(lock);
343 }
344 EXPORT_SYMBOL(_raw_write_unlock);
345 #endif
346
347 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
348 noinline void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
349 {
350         __raw_write_unlock_irqrestore(lock, flags);
351 }
352 EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
353 #endif
354
355 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
356 noinline void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
357 {
358         __raw_write_unlock_irq(lock);
359 }
360 EXPORT_SYMBOL(_raw_write_unlock_irq);
361 #endif
362
363 #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
364 noinline void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
365 {
366         __raw_write_unlock_bh(lock);
367 }
368 EXPORT_SYMBOL(_raw_write_unlock_bh);
369 #endif
370
371 #endif /* !CONFIG_PREEMPT_RT */
372
373 #ifdef CONFIG_DEBUG_LOCK_ALLOC
374
375 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
376 {
377         preempt_disable();
378         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
379         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
380 }
381 EXPORT_SYMBOL(_raw_spin_lock_nested);
382
383 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
384                                                    int subclass)
385 {
386         unsigned long flags;
387
388         local_irq_save(flags);
389         preempt_disable();
390         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
391         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
392         return flags;
393 }
394 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
395
396 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
397                                      struct lockdep_map *nest_lock)
398 {
399         preempt_disable();
400         spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
401         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
402 }
403 EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
404
405 #endif
406
407 notrace int in_lock_functions(unsigned long addr)
408 {
409         /* Linker adds these: start and end of __lockfunc functions */
410         extern char __lock_text_start[], __lock_text_end[];
411
412         return addr >= (unsigned long)__lock_text_start
413         && addr < (unsigned long)__lock_text_end;
414 }
415 EXPORT_SYMBOL(in_lock_functions);