Merge branches 'devel-stable', 'fixes' and 'mmci' into for-linus
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / time / tick-broadcast.c
1 /*
2  * linux/kernel/time/tick-broadcast.c
3  *
4  * This file contains functions which emulate a local clock-event
5  * device via a broadcast event source.
6  *
7  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10  *
11  * This code is licenced under the GPL version 2. For details see
12  * kernel-base/COPYING.
13  */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
22
23 #include "tick-internal.h"
24
25 /*
26  * Broadcast support for broken x86 hardware, where the local apic
27  * timer stops in C3 state.
28  */
29
30 static struct tick_device tick_broadcast_device;
31 /* FIXME: Use cpumask_var_t. */
32 static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33 static DECLARE_BITMAP(tmpmask, NR_CPUS);
34 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35 static int tick_broadcast_force;
36
37 #ifdef CONFIG_TICK_ONESHOT
38 static void tick_broadcast_clear_oneshot(int cpu);
39 #else
40 static inline void tick_broadcast_clear_oneshot(int cpu) { }
41 #endif
42
43 /*
44  * Debugging: see timer_list.c
45  */
46 struct tick_device *tick_get_broadcast_device(void)
47 {
48         return &tick_broadcast_device;
49 }
50
51 struct cpumask *tick_get_broadcast_mask(void)
52 {
53         return to_cpumask(tick_broadcast_mask);
54 }
55
56 /*
57  * Start the device in periodic mode
58  */
59 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
60 {
61         if (bc)
62                 tick_setup_periodic(bc, 1);
63 }
64
65 /*
66  * Check, if the device can be utilized as broadcast device:
67  */
68 int tick_check_broadcast_device(struct clock_event_device *dev)
69 {
70         if ((tick_broadcast_device.evtdev &&
71              tick_broadcast_device.evtdev->rating >= dev->rating) ||
72              (dev->features & CLOCK_EVT_FEAT_C3STOP))
73                 return 0;
74
75         clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
76         tick_broadcast_device.evtdev = dev;
77         if (!cpumask_empty(tick_get_broadcast_mask()))
78                 tick_broadcast_start_periodic(dev);
79         return 1;
80 }
81
82 /*
83  * Check, if the device is the broadcast device
84  */
85 int tick_is_broadcast_device(struct clock_event_device *dev)
86 {
87         return (dev && tick_broadcast_device.evtdev == dev);
88 }
89
90 static void err_broadcast(const struct cpumask *mask)
91 {
92         pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
93 }
94
95 /*
96  * Check, if the device is disfunctional and a place holder, which
97  * needs to be handled by the broadcast device.
98  */
99 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
100 {
101         unsigned long flags;
102         int ret = 0;
103
104         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
105
106         /*
107          * Devices might be registered with both periodic and oneshot
108          * mode disabled. This signals, that the device needs to be
109          * operated from the broadcast device and is a placeholder for
110          * the cpu local device.
111          */
112         if (!tick_device_is_functional(dev)) {
113                 dev->event_handler = tick_handle_periodic;
114                 if (!dev->broadcast)
115                         dev->broadcast = tick_broadcast;
116                 if (!dev->broadcast) {
117                         pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
118                                      dev->name);
119                         dev->broadcast = err_broadcast;
120                 }
121                 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
122                 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
123                 ret = 1;
124         } else {
125                 /*
126                  * When the new device is not affected by the stop
127                  * feature and the cpu is marked in the broadcast mask
128                  * then clear the broadcast bit.
129                  */
130                 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
131                         int cpu = smp_processor_id();
132
133                         cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
134                         tick_broadcast_clear_oneshot(cpu);
135                 }
136         }
137         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
138         return ret;
139 }
140
141 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
142 int tick_receive_broadcast(void)
143 {
144         struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
145         struct clock_event_device *evt = td->evtdev;
146
147         if (!evt)
148                 return -ENODEV;
149
150         if (!evt->event_handler)
151                 return -EINVAL;
152
153         evt->event_handler(evt);
154         return 0;
155 }
156 #endif
157
158 /*
159  * Broadcast the event to the cpus, which are set in the mask (mangled).
160  */
161 static void tick_do_broadcast(struct cpumask *mask)
162 {
163         int cpu = smp_processor_id();
164         struct tick_device *td;
165
166         /*
167          * Check, if the current cpu is in the mask
168          */
169         if (cpumask_test_cpu(cpu, mask)) {
170                 cpumask_clear_cpu(cpu, mask);
171                 td = &per_cpu(tick_cpu_device, cpu);
172                 td->evtdev->event_handler(td->evtdev);
173         }
174
175         if (!cpumask_empty(mask)) {
176                 /*
177                  * It might be necessary to actually check whether the devices
178                  * have different broadcast functions. For now, just use the
179                  * one of the first device. This works as long as we have this
180                  * misfeature only on x86 (lapic)
181                  */
182                 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
183                 td->evtdev->broadcast(mask);
184         }
185 }
186
187 /*
188  * Periodic broadcast:
189  * - invoke the broadcast handlers
190  */
191 static void tick_do_periodic_broadcast(void)
192 {
193         raw_spin_lock(&tick_broadcast_lock);
194
195         cpumask_and(to_cpumask(tmpmask),
196                     cpu_online_mask, tick_get_broadcast_mask());
197         tick_do_broadcast(to_cpumask(tmpmask));
198
199         raw_spin_unlock(&tick_broadcast_lock);
200 }
201
202 /*
203  * Event handler for periodic broadcast ticks
204  */
205 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
206 {
207         ktime_t next;
208
209         tick_do_periodic_broadcast();
210
211         /*
212          * The device is in periodic mode. No reprogramming necessary:
213          */
214         if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
215                 return;
216
217         /*
218          * Setup the next period for devices, which do not have
219          * periodic mode. We read dev->next_event first and add to it
220          * when the event already expired. clockevents_program_event()
221          * sets dev->next_event only when the event is really
222          * programmed to the device.
223          */
224         for (next = dev->next_event; ;) {
225                 next = ktime_add(next, tick_period);
226
227                 if (!clockevents_program_event(dev, next, false))
228                         return;
229                 tick_do_periodic_broadcast();
230         }
231 }
232
233 /*
234  * Powerstate information: The system enters/leaves a state, where
235  * affected devices might stop
236  */
237 static void tick_do_broadcast_on_off(unsigned long *reason)
238 {
239         struct clock_event_device *bc, *dev;
240         struct tick_device *td;
241         unsigned long flags;
242         int cpu, bc_stopped;
243
244         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
245
246         cpu = smp_processor_id();
247         td = &per_cpu(tick_cpu_device, cpu);
248         dev = td->evtdev;
249         bc = tick_broadcast_device.evtdev;
250
251         /*
252          * Is the device not affected by the powerstate ?
253          */
254         if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
255                 goto out;
256
257         if (!tick_device_is_functional(dev))
258                 goto out;
259
260         bc_stopped = cpumask_empty(tick_get_broadcast_mask());
261
262         switch (*reason) {
263         case CLOCK_EVT_NOTIFY_BROADCAST_ON:
264         case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
265                 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
266                         cpumask_set_cpu(cpu, tick_get_broadcast_mask());
267                         if (tick_broadcast_device.mode ==
268                             TICKDEV_MODE_PERIODIC)
269                                 clockevents_shutdown(dev);
270                 }
271                 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
272                         tick_broadcast_force = 1;
273                 break;
274         case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
275                 if (!tick_broadcast_force &&
276                     cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
277                         cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
278                         if (tick_broadcast_device.mode ==
279                             TICKDEV_MODE_PERIODIC)
280                                 tick_setup_periodic(dev, 0);
281                 }
282                 break;
283         }
284
285         if (cpumask_empty(tick_get_broadcast_mask())) {
286                 if (!bc_stopped)
287                         clockevents_shutdown(bc);
288         } else if (bc_stopped) {
289                 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
290                         tick_broadcast_start_periodic(bc);
291                 else
292                         tick_broadcast_setup_oneshot(bc);
293         }
294 out:
295         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
296 }
297
298 /*
299  * Powerstate information: The system enters/leaves a state, where
300  * affected devices might stop.
301  */
302 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
303 {
304         if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
305                 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
306                        "offline CPU #%d\n", *oncpu);
307         else
308                 tick_do_broadcast_on_off(&reason);
309 }
310
311 /*
312  * Set the periodic handler depending on broadcast on/off
313  */
314 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
315 {
316         if (!broadcast)
317                 dev->event_handler = tick_handle_periodic;
318         else
319                 dev->event_handler = tick_handle_periodic_broadcast;
320 }
321
322 /*
323  * Remove a CPU from broadcasting
324  */
325 void tick_shutdown_broadcast(unsigned int *cpup)
326 {
327         struct clock_event_device *bc;
328         unsigned long flags;
329         unsigned int cpu = *cpup;
330
331         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
332
333         bc = tick_broadcast_device.evtdev;
334         cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
335
336         if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
337                 if (bc && cpumask_empty(tick_get_broadcast_mask()))
338                         clockevents_shutdown(bc);
339         }
340
341         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
342 }
343
344 void tick_suspend_broadcast(void)
345 {
346         struct clock_event_device *bc;
347         unsigned long flags;
348
349         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
350
351         bc = tick_broadcast_device.evtdev;
352         if (bc)
353                 clockevents_shutdown(bc);
354
355         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
356 }
357
358 int tick_resume_broadcast(void)
359 {
360         struct clock_event_device *bc;
361         unsigned long flags;
362         int broadcast = 0;
363
364         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
365
366         bc = tick_broadcast_device.evtdev;
367
368         if (bc) {
369                 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
370
371                 switch (tick_broadcast_device.mode) {
372                 case TICKDEV_MODE_PERIODIC:
373                         if (!cpumask_empty(tick_get_broadcast_mask()))
374                                 tick_broadcast_start_periodic(bc);
375                         broadcast = cpumask_test_cpu(smp_processor_id(),
376                                                      tick_get_broadcast_mask());
377                         break;
378                 case TICKDEV_MODE_ONESHOT:
379                         if (!cpumask_empty(tick_get_broadcast_mask()))
380                                 broadcast = tick_resume_broadcast_oneshot(bc);
381                         break;
382                 }
383         }
384         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
385
386         return broadcast;
387 }
388
389
390 #ifdef CONFIG_TICK_ONESHOT
391
392 /* FIXME: use cpumask_var_t. */
393 static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
394
395 /*
396  * Exposed for debugging: see timer_list.c
397  */
398 struct cpumask *tick_get_broadcast_oneshot_mask(void)
399 {
400         return to_cpumask(tick_broadcast_oneshot_mask);
401 }
402
403 static int tick_broadcast_set_event(ktime_t expires, int force)
404 {
405         struct clock_event_device *bc = tick_broadcast_device.evtdev;
406
407         if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
408                 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
409
410         return clockevents_program_event(bc, expires, force);
411 }
412
413 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
414 {
415         clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
416         return 0;
417 }
418
419 /*
420  * Called from irq_enter() when idle was interrupted to reenable the
421  * per cpu device.
422  */
423 void tick_check_oneshot_broadcast(int cpu)
424 {
425         if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
426                 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
427
428                 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
429         }
430 }
431
432 /*
433  * Handle oneshot mode broadcasting
434  */
435 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
436 {
437         struct tick_device *td;
438         ktime_t now, next_event;
439         int cpu;
440
441         raw_spin_lock(&tick_broadcast_lock);
442 again:
443         dev->next_event.tv64 = KTIME_MAX;
444         next_event.tv64 = KTIME_MAX;
445         cpumask_clear(to_cpumask(tmpmask));
446         now = ktime_get();
447         /* Find all expired events */
448         for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
449                 td = &per_cpu(tick_cpu_device, cpu);
450                 if (td->evtdev->next_event.tv64 <= now.tv64)
451                         cpumask_set_cpu(cpu, to_cpumask(tmpmask));
452                 else if (td->evtdev->next_event.tv64 < next_event.tv64)
453                         next_event.tv64 = td->evtdev->next_event.tv64;
454         }
455
456         /*
457          * Wakeup the cpus which have an expired event.
458          */
459         tick_do_broadcast(to_cpumask(tmpmask));
460
461         /*
462          * Two reasons for reprogram:
463          *
464          * - The global event did not expire any CPU local
465          * events. This happens in dyntick mode, as the maximum PIT
466          * delta is quite small.
467          *
468          * - There are pending events on sleeping CPUs which were not
469          * in the event mask
470          */
471         if (next_event.tv64 != KTIME_MAX) {
472                 /*
473                  * Rearm the broadcast device. If event expired,
474                  * repeat the above
475                  */
476                 if (tick_broadcast_set_event(next_event, 0))
477                         goto again;
478         }
479         raw_spin_unlock(&tick_broadcast_lock);
480 }
481
482 /*
483  * Powerstate information: The system enters/leaves a state, where
484  * affected devices might stop
485  */
486 void tick_broadcast_oneshot_control(unsigned long reason)
487 {
488         struct clock_event_device *bc, *dev;
489         struct tick_device *td;
490         unsigned long flags;
491         int cpu;
492
493         /*
494          * Periodic mode does not care about the enter/exit of power
495          * states
496          */
497         if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
498                 return;
499
500         /*
501          * We are called with preemtion disabled from the depth of the
502          * idle code, so we can't be moved away.
503          */
504         cpu = smp_processor_id();
505         td = &per_cpu(tick_cpu_device, cpu);
506         dev = td->evtdev;
507
508         if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
509                 return;
510
511         bc = tick_broadcast_device.evtdev;
512
513         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
514         if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
515                 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
516                         cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
517                         clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
518                         if (dev->next_event.tv64 < bc->next_event.tv64)
519                                 tick_broadcast_set_event(dev->next_event, 1);
520                 }
521         } else {
522                 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
523                         cpumask_clear_cpu(cpu,
524                                           tick_get_broadcast_oneshot_mask());
525                         clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
526                         if (dev->next_event.tv64 != KTIME_MAX)
527                                 tick_program_event(dev->next_event, 1);
528                 }
529         }
530         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
531 }
532
533 /*
534  * Reset the one shot broadcast for a cpu
535  *
536  * Called with tick_broadcast_lock held
537  */
538 static void tick_broadcast_clear_oneshot(int cpu)
539 {
540         cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
541 }
542
543 static void tick_broadcast_init_next_event(struct cpumask *mask,
544                                            ktime_t expires)
545 {
546         struct tick_device *td;
547         int cpu;
548
549         for_each_cpu(cpu, mask) {
550                 td = &per_cpu(tick_cpu_device, cpu);
551                 if (td->evtdev)
552                         td->evtdev->next_event = expires;
553         }
554 }
555
556 /**
557  * tick_broadcast_setup_oneshot - setup the broadcast device
558  */
559 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
560 {
561         int cpu = smp_processor_id();
562
563         /* Set it up only once ! */
564         if (bc->event_handler != tick_handle_oneshot_broadcast) {
565                 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
566
567                 bc->event_handler = tick_handle_oneshot_broadcast;
568
569                 /* Take the do_timer update */
570                 tick_do_timer_cpu = cpu;
571
572                 /*
573                  * We must be careful here. There might be other CPUs
574                  * waiting for periodic broadcast. We need to set the
575                  * oneshot_mask bits for those and program the
576                  * broadcast device to fire.
577                  */
578                 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
579                 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
580                 cpumask_or(tick_get_broadcast_oneshot_mask(),
581                            tick_get_broadcast_oneshot_mask(),
582                            to_cpumask(tmpmask));
583
584                 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
585                         clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
586                         tick_broadcast_init_next_event(to_cpumask(tmpmask),
587                                                        tick_next_period);
588                         tick_broadcast_set_event(tick_next_period, 1);
589                 } else
590                         bc->next_event.tv64 = KTIME_MAX;
591         } else {
592                 /*
593                  * The first cpu which switches to oneshot mode sets
594                  * the bit for all other cpus which are in the general
595                  * (periodic) broadcast mask. So the bit is set and
596                  * would prevent the first broadcast enter after this
597                  * to program the bc device.
598                  */
599                 tick_broadcast_clear_oneshot(cpu);
600         }
601 }
602
603 /*
604  * Select oneshot operating mode for the broadcast device
605  */
606 void tick_broadcast_switch_to_oneshot(void)
607 {
608         struct clock_event_device *bc;
609         unsigned long flags;
610
611         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
612
613         tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
614         bc = tick_broadcast_device.evtdev;
615         if (bc)
616                 tick_broadcast_setup_oneshot(bc);
617
618         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
619 }
620
621
622 /*
623  * Remove a dead CPU from broadcasting
624  */
625 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
626 {
627         unsigned long flags;
628         unsigned int cpu = *cpup;
629
630         raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
631
632         /*
633          * Clear the broadcast mask flag for the dead cpu, but do not
634          * stop the broadcast device!
635          */
636         cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
637
638         raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
639 }
640
641 /*
642  * Check, whether the broadcast device is in one shot mode
643  */
644 int tick_broadcast_oneshot_active(void)
645 {
646         return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
647 }
648
649 /*
650  * Check whether the broadcast device supports oneshot.
651  */
652 bool tick_broadcast_oneshot_available(void)
653 {
654         struct clock_event_device *bc = tick_broadcast_device.evtdev;
655
656         return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
657 }
658
659 #endif