1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_PREEMPT_H
3 #define __LINUX_PREEMPT_H
6 * include/linux/preempt.h - macros for accessing and manipulating
7 * preempt_count (used for kernel preemption, interrupt count, etc.)
10 #include <linux/linkage.h>
11 #include <linux/list.h>
14 * We put the hardirq and softirq counter into the preemption
15 * counter. The bitmask has the following meaning:
17 * - bits 0-7 are the preemption count (max preemption depth: 256)
18 * - bits 8-15 are the softirq count (max # of softirqs: 256)
20 * The hardirq count could in theory be the same as the number of
21 * interrupts in the system, but we run all interrupt handlers with
22 * interrupts disabled, so we cannot have nesting interrupts. Though
23 * there are a few palaeontologic drivers which reenable interrupts in
24 * the handler, so we need more than one bit here.
26 * PREEMPT_MASK: 0x000000ff
27 * SOFTIRQ_MASK: 0x0000ff00
28 * HARDIRQ_MASK: 0x000f0000
29 * NMI_MASK: 0x00f00000
30 * PREEMPT_NEED_RESCHED: 0x80000000
32 #define PREEMPT_BITS 8
33 #define SOFTIRQ_BITS 8
34 #define HARDIRQ_BITS 4
37 #define PREEMPT_SHIFT 0
38 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
39 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
40 #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
42 #define __IRQ_MASK(x) ((1UL << (x))-1)
44 #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
45 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
46 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
47 #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
49 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
50 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
51 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
52 #define NMI_OFFSET (1UL << NMI_SHIFT)
54 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
56 #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
59 * Disable preemption until the scheduler is running -- use an unconditional
60 * value so that it also works on !PREEMPT_COUNT kernels.
62 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
64 #define INIT_PREEMPT_COUNT PREEMPT_OFFSET
67 * Initial preempt_count value; reflects the preempt_count schedule invariant
68 * which states that during context switches:
70 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
72 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
73 * Note: See finish_task_switch().
75 #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
77 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
78 #include <asm/preempt.h>
80 #define nmi_count() (preempt_count() & NMI_MASK)
81 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
82 #ifdef CONFIG_PREEMPT_RT
83 # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
85 # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
87 #define irq_count() (nmi_count() | hardirq_count() | softirq_count())
90 * Macros to retrieve the current execution context:
92 * in_nmi() - We're in NMI context
93 * in_hardirq() - We're in hard IRQ context
94 * in_serving_softirq() - We're in softirq context
95 * in_task() - We're in task context
97 #define in_nmi() (nmi_count())
98 #define in_hardirq() (hardirq_count())
99 #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
100 #define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
103 * The following macros are deprecated and should not be used in new code:
104 * in_irq() - Obsolete version of in_hardirq()
105 * in_softirq() - We have BH disabled, or are processing softirqs
106 * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
108 #define in_irq() (hardirq_count())
109 #define in_softirq() (softirq_count())
110 #define in_interrupt() (irq_count())
113 * The preempt_count offset after preempt_disable();
115 #if defined(CONFIG_PREEMPT_COUNT)
116 # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
118 # define PREEMPT_DISABLE_OFFSET 0
122 * The preempt_count offset after spin_lock()
124 #if !defined(CONFIG_PREEMPT_RT)
125 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
127 #define PREEMPT_LOCK_OFFSET 0
131 * The preempt_count offset needed for things like:
135 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
136 * softirqs, such that unlock sequences of:
143 #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
146 * Are we running in atomic context? WARNING: this macro cannot
147 * always detect atomic context; in particular, it cannot know about
148 * held spinlocks in non-preemptible kernels. Thus it should not be
149 * used in the general case to determine whether sleeping is possible.
150 * Do not use in_atomic() in driver code.
152 #define in_atomic() (preempt_count() != 0)
155 * Check whether we were atomic before we did preempt_disable():
156 * (used by the scheduler)
158 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
160 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
161 extern void preempt_count_add(int val);
162 extern void preempt_count_sub(int val);
163 #define preempt_count_dec_and_test() \
164 ({ preempt_count_sub(1); should_resched(0); })
166 #define preempt_count_add(val) __preempt_count_add(val)
167 #define preempt_count_sub(val) __preempt_count_sub(val)
168 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
171 #define __preempt_count_inc() __preempt_count_add(1)
172 #define __preempt_count_dec() __preempt_count_sub(1)
174 #define preempt_count_inc() preempt_count_add(1)
175 #define preempt_count_dec() preempt_count_sub(1)
177 #ifdef CONFIG_PREEMPT_COUNT
179 #define preempt_disable() \
181 preempt_count_inc(); \
185 #define sched_preempt_enable_no_resched() \
188 preempt_count_dec(); \
191 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
193 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
195 #ifdef CONFIG_PREEMPTION
196 #define preempt_enable() \
199 if (unlikely(preempt_count_dec_and_test())) \
200 __preempt_schedule(); \
203 #define preempt_enable_notrace() \
206 if (unlikely(__preempt_count_dec_and_test())) \
207 __preempt_schedule_notrace(); \
210 #define preempt_check_resched() \
212 if (should_resched(0)) \
213 __preempt_schedule(); \
216 #else /* !CONFIG_PREEMPTION */
217 #define preempt_enable() \
220 preempt_count_dec(); \
223 #define preempt_enable_notrace() \
226 __preempt_count_dec(); \
229 #define preempt_check_resched() do { } while (0)
230 #endif /* CONFIG_PREEMPTION */
232 #define preempt_disable_notrace() \
234 __preempt_count_inc(); \
238 #define preempt_enable_no_resched_notrace() \
241 __preempt_count_dec(); \
244 #else /* !CONFIG_PREEMPT_COUNT */
247 * Even if we don't have any preemption, we need preempt disable/enable
248 * to be barriers, so that we don't have things like get_user/put_user
249 * that can cause faults and scheduling migrate into our preempt-protected
252 #define preempt_disable() barrier()
253 #define sched_preempt_enable_no_resched() barrier()
254 #define preempt_enable_no_resched() barrier()
255 #define preempt_enable() barrier()
256 #define preempt_check_resched() do { } while (0)
258 #define preempt_disable_notrace() barrier()
259 #define preempt_enable_no_resched_notrace() barrier()
260 #define preempt_enable_notrace() barrier()
261 #define preemptible() 0
263 #endif /* CONFIG_PREEMPT_COUNT */
267 * Modules have no business playing preemption tricks.
269 #undef sched_preempt_enable_no_resched
270 #undef preempt_enable_no_resched
271 #undef preempt_enable_no_resched_notrace
272 #undef preempt_check_resched
275 #define preempt_set_need_resched() \
277 set_preempt_need_resched(); \
279 #define preempt_fold_need_resched() \
281 if (tif_need_resched()) \
282 set_preempt_need_resched(); \
285 #ifdef CONFIG_PREEMPT_NOTIFIERS
287 struct preempt_notifier;
290 * preempt_ops - notifiers called when a task is preempted and rescheduled
291 * @sched_in: we're about to be rescheduled:
292 * notifier: struct preempt_notifier for the task being scheduled
293 * cpu: cpu we're scheduled on
294 * @sched_out: we've just been preempted
295 * notifier: struct preempt_notifier for the task being preempted
296 * next: the task that's kicking us out
298 * Please note that sched_in and out are called under different
299 * contexts. sched_out is called with rq lock held and irq disabled
300 * while sched_in is called without rq lock and irq enabled. This
301 * difference is intentional and depended upon by its users.
304 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
305 void (*sched_out)(struct preempt_notifier *notifier,
306 struct task_struct *next);
310 * preempt_notifier - key for installing preemption notifiers
311 * @link: internal use
312 * @ops: defines the notifier functions to be called
314 * Usually used in conjunction with container_of().
316 struct preempt_notifier {
317 struct hlist_node link;
318 struct preempt_ops *ops;
321 void preempt_notifier_inc(void);
322 void preempt_notifier_dec(void);
323 void preempt_notifier_register(struct preempt_notifier *notifier);
324 void preempt_notifier_unregister(struct preempt_notifier *notifier);
326 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
327 struct preempt_ops *ops)
329 INIT_HLIST_NODE(¬ifier->link);
338 * Migrate-Disable and why it is undesired.
340 * When a preempted task becomes elegible to run under the ideal model (IOW it
341 * becomes one of the M highest priority tasks), it might still have to wait
342 * for the preemptee's migrate_disable() section to complete. Thereby suffering
343 * a reduction in bandwidth in the exact duration of the migrate_disable()
346 * Per this argument, the change from preempt_disable() to migrate_disable()
349 * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
350 * it would have had to wait for the lower priority task.
352 * - a lower priority tasks; which under preempt_disable() could've instantly
353 * migrated away when another CPU becomes available, is now constrained
354 * by the ability to push the higher priority task away, which might itself be
355 * in a migrate_disable() section, reducing it's available bandwidth.
357 * IOW it trades latency / moves the interference term, but it stays in the
358 * system, and as long as it remains unbounded, the system is not fully
362 * The reason we have it anyway.
364 * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
365 * number of primitives into becoming preemptible, they would also allow
366 * migration. This turns out to break a bunch of per-cpu usage. To this end,
367 * all these primitives employ migirate_disable() to restore this implicit
370 * This is a 'temporary' work-around at best. The correct solution is getting
371 * rid of the above assumptions and reworking the code to employ explicit
372 * per-cpu locking or short preempt-disable regions.
374 * The end goal must be to get rid of migrate_disable(), alternatively we need
375 * a schedulability theory that does not depend on abritrary migration.
378 * Notes on the implementation.
380 * The implementation is particularly tricky since existing code patterns
381 * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
382 * This means that it cannot use cpus_read_lock() to serialize against hotplug,
383 * nor can it easily migrate itself into a pending affinity mask change on
387 * Note: even non-work-conserving schedulers like semi-partitioned depends on
388 * migration, so migrate_disable() is not only a problem for
389 * work-conserving schedulers.
392 extern void migrate_disable(void);
393 extern void migrate_enable(void);
397 static inline void migrate_disable(void) { }
398 static inline void migrate_enable(void) { }
400 #endif /* CONFIG_SMP */
402 #endif /* __LINUX_PREEMPT_H */