2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
23 #include "tick-internal.h"
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
30 static struct tick_device tick_broadcast_device;
31 static cpumask_var_t tick_broadcast_mask;
32 static cpumask_var_t tmpmask;
33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 static int tick_broadcast_force;
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(int cpu);
39 static inline void tick_broadcast_clear_oneshot(int cpu) { }
43 * Debugging: see timer_list.c
45 struct tick_device *tick_get_broadcast_device(void)
47 return &tick_broadcast_device;
50 struct cpumask *tick_get_broadcast_mask(void)
52 return tick_broadcast_mask;
56 * Start the device in periodic mode
58 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
61 tick_setup_periodic(bc, 1);
65 * Check, if the device can be utilized as broadcast device:
67 int tick_check_broadcast_device(struct clock_event_device *dev)
69 if ((tick_broadcast_device.evtdev &&
70 tick_broadcast_device.evtdev->rating >= dev->rating) ||
71 (dev->features & CLOCK_EVT_FEAT_C3STOP))
74 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
75 tick_broadcast_device.evtdev = dev;
76 if (!cpumask_empty(tick_broadcast_mask))
77 tick_broadcast_start_periodic(dev);
82 * Check, if the device is the broadcast device
84 int tick_is_broadcast_device(struct clock_event_device *dev)
86 return (dev && tick_broadcast_device.evtdev == dev);
89 static void err_broadcast(const struct cpumask *mask)
91 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
94 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
97 dev->broadcast = tick_broadcast;
98 if (!dev->broadcast) {
99 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
101 dev->broadcast = err_broadcast;
106 * Check, if the device is disfunctional and a place holder, which
107 * needs to be handled by the broadcast device.
109 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
114 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
117 * Devices might be registered with both periodic and oneshot
118 * mode disabled. This signals, that the device needs to be
119 * operated from the broadcast device and is a placeholder for
120 * the cpu local device.
122 if (!tick_device_is_functional(dev)) {
123 dev->event_handler = tick_handle_periodic;
124 tick_device_setup_broadcast_func(dev);
125 cpumask_set_cpu(cpu, tick_broadcast_mask);
126 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
130 * When the new device is not affected by the stop
131 * feature and the cpu is marked in the broadcast mask
132 * then clear the broadcast bit.
134 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
135 int cpu = smp_processor_id();
136 cpumask_clear_cpu(cpu, tick_broadcast_mask);
137 tick_broadcast_clear_oneshot(cpu);
139 tick_device_setup_broadcast_func(dev);
142 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
146 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
147 int tick_receive_broadcast(void)
149 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
150 struct clock_event_device *evt = td->evtdev;
155 if (!evt->event_handler)
158 evt->event_handler(evt);
164 * Broadcast the event to the cpus, which are set in the mask (mangled).
166 static void tick_do_broadcast(struct cpumask *mask)
168 int cpu = smp_processor_id();
169 struct tick_device *td;
172 * Check, if the current cpu is in the mask
174 if (cpumask_test_cpu(cpu, mask)) {
175 cpumask_clear_cpu(cpu, mask);
176 td = &per_cpu(tick_cpu_device, cpu);
177 td->evtdev->event_handler(td->evtdev);
180 if (!cpumask_empty(mask)) {
182 * It might be necessary to actually check whether the devices
183 * have different broadcast functions. For now, just use the
184 * one of the first device. This works as long as we have this
185 * misfeature only on x86 (lapic)
187 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
188 td->evtdev->broadcast(mask);
193 * Periodic broadcast:
194 * - invoke the broadcast handlers
196 static void tick_do_periodic_broadcast(void)
198 raw_spin_lock(&tick_broadcast_lock);
200 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
201 tick_do_broadcast(tmpmask);
203 raw_spin_unlock(&tick_broadcast_lock);
207 * Event handler for periodic broadcast ticks
209 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
213 tick_do_periodic_broadcast();
216 * The device is in periodic mode. No reprogramming necessary:
218 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
222 * Setup the next period for devices, which do not have
223 * periodic mode. We read dev->next_event first and add to it
224 * when the event already expired. clockevents_program_event()
225 * sets dev->next_event only when the event is really
226 * programmed to the device.
228 for (next = dev->next_event; ;) {
229 next = ktime_add(next, tick_period);
231 if (!clockevents_program_event(dev, next, false))
233 tick_do_periodic_broadcast();
238 * Powerstate information: The system enters/leaves a state, where
239 * affected devices might stop
241 static void tick_do_broadcast_on_off(unsigned long *reason)
243 struct clock_event_device *bc, *dev;
244 struct tick_device *td;
248 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
250 cpu = smp_processor_id();
251 td = &per_cpu(tick_cpu_device, cpu);
253 bc = tick_broadcast_device.evtdev;
256 * Is the device not affected by the powerstate ?
258 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
261 if (!tick_device_is_functional(dev))
264 bc_stopped = cpumask_empty(tick_broadcast_mask);
267 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
268 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
269 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
270 if (tick_broadcast_device.mode ==
271 TICKDEV_MODE_PERIODIC)
272 clockevents_shutdown(dev);
274 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
275 tick_broadcast_force = 1;
277 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
278 if (!tick_broadcast_force &&
279 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
280 if (tick_broadcast_device.mode ==
281 TICKDEV_MODE_PERIODIC)
282 tick_setup_periodic(dev, 0);
287 if (cpumask_empty(tick_broadcast_mask)) {
289 clockevents_shutdown(bc);
290 } else if (bc_stopped) {
291 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
292 tick_broadcast_start_periodic(bc);
294 tick_broadcast_setup_oneshot(bc);
297 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
301 * Powerstate information: The system enters/leaves a state, where
302 * affected devices might stop.
304 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
306 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
307 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
308 "offline CPU #%d\n", *oncpu);
310 tick_do_broadcast_on_off(&reason);
314 * Set the periodic handler depending on broadcast on/off
316 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
319 dev->event_handler = tick_handle_periodic;
321 dev->event_handler = tick_handle_periodic_broadcast;
325 * Remove a CPU from broadcasting
327 void tick_shutdown_broadcast(unsigned int *cpup)
329 struct clock_event_device *bc;
331 unsigned int cpu = *cpup;
333 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
335 bc = tick_broadcast_device.evtdev;
336 cpumask_clear_cpu(cpu, tick_broadcast_mask);
338 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
339 if (bc && cpumask_empty(tick_broadcast_mask))
340 clockevents_shutdown(bc);
343 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
346 void tick_suspend_broadcast(void)
348 struct clock_event_device *bc;
351 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
353 bc = tick_broadcast_device.evtdev;
355 clockevents_shutdown(bc);
357 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
360 int tick_resume_broadcast(void)
362 struct clock_event_device *bc;
366 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
368 bc = tick_broadcast_device.evtdev;
371 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
373 switch (tick_broadcast_device.mode) {
374 case TICKDEV_MODE_PERIODIC:
375 if (!cpumask_empty(tick_broadcast_mask))
376 tick_broadcast_start_periodic(bc);
377 broadcast = cpumask_test_cpu(smp_processor_id(),
378 tick_broadcast_mask);
380 case TICKDEV_MODE_ONESHOT:
381 if (!cpumask_empty(tick_broadcast_mask))
382 broadcast = tick_resume_broadcast_oneshot(bc);
386 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
392 #ifdef CONFIG_TICK_ONESHOT
394 static cpumask_var_t tick_broadcast_oneshot_mask;
397 * Exposed for debugging: see timer_list.c
399 struct cpumask *tick_get_broadcast_oneshot_mask(void)
401 return tick_broadcast_oneshot_mask;
404 static int tick_broadcast_set_event(struct clock_event_device *bc,
405 ktime_t expires, int force)
407 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
408 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
410 return clockevents_program_event(bc, expires, force);
413 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
415 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
420 * Called from irq_enter() when idle was interrupted to reenable the
423 void tick_check_oneshot_broadcast(int cpu)
425 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
426 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
428 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
433 * Handle oneshot mode broadcasting
435 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
437 struct tick_device *td;
438 ktime_t now, next_event;
441 raw_spin_lock(&tick_broadcast_lock);
443 dev->next_event.tv64 = KTIME_MAX;
444 next_event.tv64 = KTIME_MAX;
445 cpumask_clear(tmpmask);
447 /* Find all expired events */
448 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
449 td = &per_cpu(tick_cpu_device, cpu);
450 if (td->evtdev->next_event.tv64 <= now.tv64)
451 cpumask_set_cpu(cpu, tmpmask);
452 else if (td->evtdev->next_event.tv64 < next_event.tv64)
453 next_event.tv64 = td->evtdev->next_event.tv64;
457 * Wakeup the cpus which have an expired event.
459 tick_do_broadcast(tmpmask);
462 * Two reasons for reprogram:
464 * - The global event did not expire any CPU local
465 * events. This happens in dyntick mode, as the maximum PIT
466 * delta is quite small.
468 * - There are pending events on sleeping CPUs which were not
471 if (next_event.tv64 != KTIME_MAX) {
473 * Rearm the broadcast device. If event expired,
476 if (tick_broadcast_set_event(dev, next_event, 0))
479 raw_spin_unlock(&tick_broadcast_lock);
483 * Powerstate information: The system enters/leaves a state, where
484 * affected devices might stop
486 void tick_broadcast_oneshot_control(unsigned long reason)
488 struct clock_event_device *bc, *dev;
489 struct tick_device *td;
494 * Periodic mode does not care about the enter/exit of power
497 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
501 * We are called with preemtion disabled from the depth of the
502 * idle code, so we can't be moved away.
504 cpu = smp_processor_id();
505 td = &per_cpu(tick_cpu_device, cpu);
508 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
511 bc = tick_broadcast_device.evtdev;
513 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
514 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
515 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
516 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
517 if (dev->next_event.tv64 < bc->next_event.tv64)
518 tick_broadcast_set_event(bc, dev->next_event, 1);
521 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
522 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
523 if (dev->next_event.tv64 != KTIME_MAX)
524 tick_program_event(dev->next_event, 1);
527 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
531 * Reset the one shot broadcast for a cpu
533 * Called with tick_broadcast_lock held
535 static void tick_broadcast_clear_oneshot(int cpu)
537 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
540 static void tick_broadcast_init_next_event(struct cpumask *mask,
543 struct tick_device *td;
546 for_each_cpu(cpu, mask) {
547 td = &per_cpu(tick_cpu_device, cpu);
549 td->evtdev->next_event = expires;
554 * tick_broadcast_setup_oneshot - setup the broadcast device
556 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
558 int cpu = smp_processor_id();
560 /* Set it up only once ! */
561 if (bc->event_handler != tick_handle_oneshot_broadcast) {
562 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
564 bc->event_handler = tick_handle_oneshot_broadcast;
566 /* Take the do_timer update */
567 tick_do_timer_cpu = cpu;
570 * We must be careful here. There might be other CPUs
571 * waiting for periodic broadcast. We need to set the
572 * oneshot_mask bits for those and program the
573 * broadcast device to fire.
575 cpumask_copy(tmpmask, tick_broadcast_mask);
576 cpumask_clear_cpu(cpu, tmpmask);
577 cpumask_or(tick_broadcast_oneshot_mask,
578 tick_broadcast_oneshot_mask, tmpmask);
580 if (was_periodic && !cpumask_empty(tmpmask)) {
581 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
582 tick_broadcast_init_next_event(tmpmask,
584 tick_broadcast_set_event(bc, tick_next_period, 1);
586 bc->next_event.tv64 = KTIME_MAX;
589 * The first cpu which switches to oneshot mode sets
590 * the bit for all other cpus which are in the general
591 * (periodic) broadcast mask. So the bit is set and
592 * would prevent the first broadcast enter after this
593 * to program the bc device.
595 tick_broadcast_clear_oneshot(cpu);
600 * Select oneshot operating mode for the broadcast device
602 void tick_broadcast_switch_to_oneshot(void)
604 struct clock_event_device *bc;
607 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
609 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
610 bc = tick_broadcast_device.evtdev;
612 tick_broadcast_setup_oneshot(bc);
614 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
619 * Remove a dead CPU from broadcasting
621 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
624 unsigned int cpu = *cpup;
626 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
629 * Clear the broadcast mask flag for the dead cpu, but do not
630 * stop the broadcast device!
632 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
634 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
638 * Check, whether the broadcast device is in one shot mode
640 int tick_broadcast_oneshot_active(void)
642 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
646 * Check whether the broadcast device supports oneshot.
648 bool tick_broadcast_oneshot_available(void)
650 struct clock_event_device *bc = tick_broadcast_device.evtdev;
652 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
657 void __init tick_broadcast_init(void)
659 alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
660 alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
661 #ifdef CONFIG_TICK_ONESHOT
662 alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);