2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
23 #include "tick-internal.h"
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
30 static struct tick_device tick_broadcast_device;
31 /* FIXME: Use cpumask_var_t. */
32 static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33 static DECLARE_BITMAP(tmpmask, NR_CPUS);
34 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35 static int tick_broadcast_force;
37 #ifdef CONFIG_TICK_ONESHOT
38 static void tick_broadcast_clear_oneshot(int cpu);
40 static inline void tick_broadcast_clear_oneshot(int cpu) { }
44 * Debugging: see timer_list.c
46 struct tick_device *tick_get_broadcast_device(void)
48 return &tick_broadcast_device;
51 struct cpumask *tick_get_broadcast_mask(void)
53 return to_cpumask(tick_broadcast_mask);
57 * Start the device in periodic mode
59 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
62 tick_setup_periodic(bc, 1);
66 * Check, if the device can be utilized as broadcast device:
68 int tick_check_broadcast_device(struct clock_event_device *dev)
70 if ((tick_broadcast_device.evtdev &&
71 tick_broadcast_device.evtdev->rating >= dev->rating) ||
72 (dev->features & CLOCK_EVT_FEAT_C3STOP))
75 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
76 tick_broadcast_device.evtdev = dev;
77 if (!cpumask_empty(tick_get_broadcast_mask()))
78 tick_broadcast_start_periodic(dev);
83 * Check, if the device is the broadcast device
85 int tick_is_broadcast_device(struct clock_event_device *dev)
87 return (dev && tick_broadcast_device.evtdev == dev);
90 static void err_broadcast(const struct cpumask *mask)
92 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
96 * Check, if the device is disfunctional and a place holder, which
97 * needs to be handled by the broadcast device.
99 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
104 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
107 * Devices might be registered with both periodic and oneshot
108 * mode disabled. This signals, that the device needs to be
109 * operated from the broadcast device and is a placeholder for
110 * the cpu local device.
112 if (!tick_device_is_functional(dev)) {
113 dev->event_handler = tick_handle_periodic;
115 dev->broadcast = tick_broadcast;
116 if (!dev->broadcast) {
117 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
119 dev->broadcast = err_broadcast;
121 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
122 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
126 * When the new device is not affected by the stop
127 * feature and the cpu is marked in the broadcast mask
128 * then clear the broadcast bit.
130 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
131 int cpu = smp_processor_id();
133 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
134 tick_broadcast_clear_oneshot(cpu);
137 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
141 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
142 int tick_receive_broadcast(void)
144 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
145 struct clock_event_device *evt = td->evtdev;
150 if (!evt->event_handler)
153 evt->event_handler(evt);
159 * Broadcast the event to the cpus, which are set in the mask (mangled).
161 static void tick_do_broadcast(struct cpumask *mask)
163 int cpu = smp_processor_id();
164 struct tick_device *td;
167 * Check, if the current cpu is in the mask
169 if (cpumask_test_cpu(cpu, mask)) {
170 cpumask_clear_cpu(cpu, mask);
171 td = &per_cpu(tick_cpu_device, cpu);
172 td->evtdev->event_handler(td->evtdev);
175 if (!cpumask_empty(mask)) {
177 * It might be necessary to actually check whether the devices
178 * have different broadcast functions. For now, just use the
179 * one of the first device. This works as long as we have this
180 * misfeature only on x86 (lapic)
182 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
183 td->evtdev->broadcast(mask);
188 * Periodic broadcast:
189 * - invoke the broadcast handlers
191 static void tick_do_periodic_broadcast(void)
193 raw_spin_lock(&tick_broadcast_lock);
195 cpumask_and(to_cpumask(tmpmask),
196 cpu_online_mask, tick_get_broadcast_mask());
197 tick_do_broadcast(to_cpumask(tmpmask));
199 raw_spin_unlock(&tick_broadcast_lock);
203 * Event handler for periodic broadcast ticks
205 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
209 tick_do_periodic_broadcast();
212 * The device is in periodic mode. No reprogramming necessary:
214 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
218 * Setup the next period for devices, which do not have
219 * periodic mode. We read dev->next_event first and add to it
220 * when the event already expired. clockevents_program_event()
221 * sets dev->next_event only when the event is really
222 * programmed to the device.
224 for (next = dev->next_event; ;) {
225 next = ktime_add(next, tick_period);
227 if (!clockevents_program_event(dev, next, false))
229 tick_do_periodic_broadcast();
234 * Powerstate information: The system enters/leaves a state, where
235 * affected devices might stop
237 static void tick_do_broadcast_on_off(unsigned long *reason)
239 struct clock_event_device *bc, *dev;
240 struct tick_device *td;
244 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
246 cpu = smp_processor_id();
247 td = &per_cpu(tick_cpu_device, cpu);
249 bc = tick_broadcast_device.evtdev;
252 * Is the device not affected by the powerstate ?
254 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
257 if (!tick_device_is_functional(dev))
260 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
263 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
264 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
265 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
266 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
267 if (tick_broadcast_device.mode ==
268 TICKDEV_MODE_PERIODIC)
269 clockevents_shutdown(dev);
271 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
272 tick_broadcast_force = 1;
274 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
275 if (!tick_broadcast_force &&
276 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
277 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
278 if (tick_broadcast_device.mode ==
279 TICKDEV_MODE_PERIODIC)
280 tick_setup_periodic(dev, 0);
285 if (cpumask_empty(tick_get_broadcast_mask())) {
287 clockevents_shutdown(bc);
288 } else if (bc_stopped) {
289 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
290 tick_broadcast_start_periodic(bc);
292 tick_broadcast_setup_oneshot(bc);
295 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
299 * Powerstate information: The system enters/leaves a state, where
300 * affected devices might stop.
302 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
304 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
305 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
306 "offline CPU #%d\n", *oncpu);
308 tick_do_broadcast_on_off(&reason);
312 * Set the periodic handler depending on broadcast on/off
314 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
317 dev->event_handler = tick_handle_periodic;
319 dev->event_handler = tick_handle_periodic_broadcast;
323 * Remove a CPU from broadcasting
325 void tick_shutdown_broadcast(unsigned int *cpup)
327 struct clock_event_device *bc;
329 unsigned int cpu = *cpup;
331 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
333 bc = tick_broadcast_device.evtdev;
334 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
336 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
337 if (bc && cpumask_empty(tick_get_broadcast_mask()))
338 clockevents_shutdown(bc);
341 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
344 void tick_suspend_broadcast(void)
346 struct clock_event_device *bc;
349 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
351 bc = tick_broadcast_device.evtdev;
353 clockevents_shutdown(bc);
355 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
358 int tick_resume_broadcast(void)
360 struct clock_event_device *bc;
364 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
366 bc = tick_broadcast_device.evtdev;
369 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
371 switch (tick_broadcast_device.mode) {
372 case TICKDEV_MODE_PERIODIC:
373 if (!cpumask_empty(tick_get_broadcast_mask()))
374 tick_broadcast_start_periodic(bc);
375 broadcast = cpumask_test_cpu(smp_processor_id(),
376 tick_get_broadcast_mask());
378 case TICKDEV_MODE_ONESHOT:
379 if (!cpumask_empty(tick_get_broadcast_mask()))
380 broadcast = tick_resume_broadcast_oneshot(bc);
384 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
390 #ifdef CONFIG_TICK_ONESHOT
392 /* FIXME: use cpumask_var_t. */
393 static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
396 * Exposed for debugging: see timer_list.c
398 struct cpumask *tick_get_broadcast_oneshot_mask(void)
400 return to_cpumask(tick_broadcast_oneshot_mask);
403 static int tick_broadcast_set_event(ktime_t expires, int force)
405 struct clock_event_device *bc = tick_broadcast_device.evtdev;
407 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
408 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
410 return clockevents_program_event(bc, expires, force);
413 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
415 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
420 * Called from irq_enter() when idle was interrupted to reenable the
423 void tick_check_oneshot_broadcast(int cpu)
425 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
426 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
428 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
433 * Handle oneshot mode broadcasting
435 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
437 struct tick_device *td;
438 ktime_t now, next_event;
441 raw_spin_lock(&tick_broadcast_lock);
443 dev->next_event.tv64 = KTIME_MAX;
444 next_event.tv64 = KTIME_MAX;
445 cpumask_clear(to_cpumask(tmpmask));
447 /* Find all expired events */
448 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
449 td = &per_cpu(tick_cpu_device, cpu);
450 if (td->evtdev->next_event.tv64 <= now.tv64)
451 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
452 else if (td->evtdev->next_event.tv64 < next_event.tv64)
453 next_event.tv64 = td->evtdev->next_event.tv64;
457 * Wakeup the cpus which have an expired event.
459 tick_do_broadcast(to_cpumask(tmpmask));
462 * Two reasons for reprogram:
464 * - The global event did not expire any CPU local
465 * events. This happens in dyntick mode, as the maximum PIT
466 * delta is quite small.
468 * - There are pending events on sleeping CPUs which were not
471 if (next_event.tv64 != KTIME_MAX) {
473 * Rearm the broadcast device. If event expired,
476 if (tick_broadcast_set_event(next_event, 0))
479 raw_spin_unlock(&tick_broadcast_lock);
483 * Powerstate information: The system enters/leaves a state, where
484 * affected devices might stop
486 void tick_broadcast_oneshot_control(unsigned long reason)
488 struct clock_event_device *bc, *dev;
489 struct tick_device *td;
494 * Periodic mode does not care about the enter/exit of power
497 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
501 * We are called with preemtion disabled from the depth of the
502 * idle code, so we can't be moved away.
504 cpu = smp_processor_id();
505 td = &per_cpu(tick_cpu_device, cpu);
508 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
511 bc = tick_broadcast_device.evtdev;
513 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
514 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
515 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
516 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
517 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
518 if (dev->next_event.tv64 < bc->next_event.tv64)
519 tick_broadcast_set_event(dev->next_event, 1);
522 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
523 cpumask_clear_cpu(cpu,
524 tick_get_broadcast_oneshot_mask());
525 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
526 if (dev->next_event.tv64 != KTIME_MAX)
527 tick_program_event(dev->next_event, 1);
530 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
534 * Reset the one shot broadcast for a cpu
536 * Called with tick_broadcast_lock held
538 static void tick_broadcast_clear_oneshot(int cpu)
540 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
543 static void tick_broadcast_init_next_event(struct cpumask *mask,
546 struct tick_device *td;
549 for_each_cpu(cpu, mask) {
550 td = &per_cpu(tick_cpu_device, cpu);
552 td->evtdev->next_event = expires;
557 * tick_broadcast_setup_oneshot - setup the broadcast device
559 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
561 int cpu = smp_processor_id();
563 /* Set it up only once ! */
564 if (bc->event_handler != tick_handle_oneshot_broadcast) {
565 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
567 bc->event_handler = tick_handle_oneshot_broadcast;
569 /* Take the do_timer update */
570 tick_do_timer_cpu = cpu;
573 * We must be careful here. There might be other CPUs
574 * waiting for periodic broadcast. We need to set the
575 * oneshot_mask bits for those and program the
576 * broadcast device to fire.
578 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
579 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
580 cpumask_or(tick_get_broadcast_oneshot_mask(),
581 tick_get_broadcast_oneshot_mask(),
582 to_cpumask(tmpmask));
584 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
585 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
586 tick_broadcast_init_next_event(to_cpumask(tmpmask),
588 tick_broadcast_set_event(tick_next_period, 1);
590 bc->next_event.tv64 = KTIME_MAX;
593 * The first cpu which switches to oneshot mode sets
594 * the bit for all other cpus which are in the general
595 * (periodic) broadcast mask. So the bit is set and
596 * would prevent the first broadcast enter after this
597 * to program the bc device.
599 tick_broadcast_clear_oneshot(cpu);
604 * Select oneshot operating mode for the broadcast device
606 void tick_broadcast_switch_to_oneshot(void)
608 struct clock_event_device *bc;
611 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
613 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
614 bc = tick_broadcast_device.evtdev;
616 tick_broadcast_setup_oneshot(bc);
618 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
623 * Remove a dead CPU from broadcasting
625 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
628 unsigned int cpu = *cpup;
630 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
633 * Clear the broadcast mask flag for the dead cpu, but do not
634 * stop the broadcast device!
636 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
638 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
642 * Check, whether the broadcast device is in one shot mode
644 int tick_broadcast_oneshot_active(void)
646 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
650 * Check whether the broadcast device supports oneshot.
652 bool tick_broadcast_oneshot_available(void)
654 struct clock_event_device *bc = tick_broadcast_device.evtdev;
656 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;