Merge tag 'linux-watchdog-5.15-rc7' of git://www.linux-watchdog.org/linux-watchdog
[platform/kernel/linux-starfive.git] / include / linux / spinlock_rt.h
1 // SPDX-License-Identifier: GPL-2.0-only
2 #ifndef __LINUX_SPINLOCK_RT_H
3 #define __LINUX_SPINLOCK_RT_H
4
5 #ifndef __LINUX_SPINLOCK_H
6 #error Do not include directly. Use spinlock.h
7 #endif
8
9 #ifdef CONFIG_DEBUG_LOCK_ALLOC
10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
11                                 struct lock_class_key *key, bool percpu);
12 #else
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
14                                 struct lock_class_key *key, bool percpu)
15 {
16 }
17 #endif
18
19 #define spin_lock_init(slock)                                   \
20 do {                                                            \
21         static struct lock_class_key __key;                     \
22                                                                 \
23         rt_mutex_base_init(&(slock)->lock);                     \
24         __rt_spin_lock_init(slock, #slock, &__key, false);      \
25 } while (0)
26
27 #define local_spin_lock_init(slock)                             \
28 do {                                                            \
29         static struct lock_class_key __key;                     \
30                                                                 \
31         rt_mutex_base_init(&(slock)->lock);                     \
32         __rt_spin_lock_init(slock, #slock, &__key, true);       \
33 } while (0)
34
35 extern void rt_spin_lock(spinlock_t *lock);
36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
38 extern void rt_spin_unlock(spinlock_t *lock);
39 extern void rt_spin_lock_unlock(spinlock_t *lock);
40 extern int rt_spin_trylock_bh(spinlock_t *lock);
41 extern int rt_spin_trylock(spinlock_t *lock);
42
43 static __always_inline void spin_lock(spinlock_t *lock)
44 {
45         rt_spin_lock(lock);
46 }
47
48 #ifdef CONFIG_LOCKDEP
49 # define __spin_lock_nested(lock, subclass)                             \
50         rt_spin_lock_nested(lock, subclass)
51
52 # define __spin_lock_nest_lock(lock, nest_lock)                         \
53         do {                                                            \
54                 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
55                 rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);    \
56         } while (0)
57 # define __spin_lock_irqsave_nested(lock, flags, subclass)      \
58         do {                                                    \
59                 typecheck(unsigned long, flags);                \
60                 flags = 0;                                      \
61                 __spin_lock_nested(lock, subclass);             \
62         } while (0)
63
64 #else
65  /*
66   * Always evaluate the 'subclass' argument to avoid that the compiler
67   * warns about set-but-not-used variables when building with
68   * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
69   */
70 # define __spin_lock_nested(lock, subclass)     spin_lock(((void)(subclass), (lock)))
71 # define __spin_lock_nest_lock(lock, subclass)  spin_lock(((void)(subclass), (lock)))
72 # define __spin_lock_irqsave_nested(lock, flags, subclass)      \
73         spin_lock_irqsave(((void)(subclass), (lock)), flags)
74 #endif
75
76 #define spin_lock_nested(lock, subclass)                \
77         __spin_lock_nested(lock, subclass)
78
79 #define spin_lock_nest_lock(lock, nest_lock)            \
80         __spin_lock_nest_lock(lock, nest_lock)
81
82 #define spin_lock_irqsave_nested(lock, flags, subclass) \
83         __spin_lock_irqsave_nested(lock, flags, subclass)
84
85 static __always_inline void spin_lock_bh(spinlock_t *lock)
86 {
87         /* Investigate: Drop bh when blocking ? */
88         local_bh_disable();
89         rt_spin_lock(lock);
90 }
91
92 static __always_inline void spin_lock_irq(spinlock_t *lock)
93 {
94         rt_spin_lock(lock);
95 }
96
97 #define spin_lock_irqsave(lock, flags)                   \
98         do {                                             \
99                 typecheck(unsigned long, flags);         \
100                 flags = 0;                               \
101                 spin_lock(lock);                         \
102         } while (0)
103
104 static __always_inline void spin_unlock(spinlock_t *lock)
105 {
106         rt_spin_unlock(lock);
107 }
108
109 static __always_inline void spin_unlock_bh(spinlock_t *lock)
110 {
111         rt_spin_unlock(lock);
112         local_bh_enable();
113 }
114
115 static __always_inline void spin_unlock_irq(spinlock_t *lock)
116 {
117         rt_spin_unlock(lock);
118 }
119
120 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
121                                                    unsigned long flags)
122 {
123         rt_spin_unlock(lock);
124 }
125
126 #define spin_trylock(lock)                              \
127         __cond_lock(lock, rt_spin_trylock(lock))
128
129 #define spin_trylock_bh(lock)                           \
130         __cond_lock(lock, rt_spin_trylock_bh(lock))
131
132 #define spin_trylock_irq(lock)                          \
133         __cond_lock(lock, rt_spin_trylock(lock))
134
135 #define __spin_trylock_irqsave(lock, flags)             \
136 ({                                                      \
137         int __locked;                                   \
138                                                         \
139         typecheck(unsigned long, flags);                \
140         flags = 0;                                      \
141         __locked = spin_trylock(lock);                  \
142         __locked;                                       \
143 })
144
145 #define spin_trylock_irqsave(lock, flags)               \
146         __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
147
148 #define spin_is_contended(lock)         (((void)(lock), 0))
149
150 static inline int spin_is_locked(spinlock_t *lock)
151 {
152         return rt_mutex_base_is_locked(&lock->lock);
153 }
154
155 #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
156
157 #include <linux/rwlock_rt.h>
158
159 #endif