tick: Sanitize broadcast control logic
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / time / tick-common.c
index 5d3fb10..64522ec 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
+#include <linux/module.h>
 
 #include <asm/irq_regs.h>
 
@@ -33,7 +34,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
 ktime_t tick_next_period;
 ktime_t tick_period;
 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
-static DEFINE_RAW_SPINLOCK(tick_device_lock);
 
 /*
  * Debugging: see timer_list.c
@@ -194,7 +194,8 @@ static void tick_setup_device(struct tick_device *td,
         * When global broadcasting is active, check if the current
         * device is registered as a placeholder for broadcast mode.
         * This allows us to handle this x86 misfeature in a generic
-        * way.
+        * way. This function also returns !=0 when we keep the
+        * current active broadcast state for this CPU.
         */
        if (tick_device_uses_broadcast(newdev, cpu))
                return;
@@ -205,17 +206,75 @@ static void tick_setup_device(struct tick_device *td,
                tick_setup_oneshot(newdev, handler, next_event);
 }
 
+void tick_install_replacement(struct clock_event_device *newdev)
+{
+       struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+       int cpu = smp_processor_id();
+
+       clockevents_exchange_device(td->evtdev, newdev);
+       tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
+       if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
+               tick_oneshot_notify();
+}
+
+static bool tick_check_percpu(struct clock_event_device *curdev,
+                             struct clock_event_device *newdev, int cpu)
+{
+       if (!cpumask_test_cpu(cpu, newdev->cpumask))
+               return false;
+       if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
+               return true;
+       /* Check if irq affinity can be set */
+       if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
+               return false;
+       /* Prefer an existing cpu local device */
+       if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
+               return false;
+       return true;
+}
+
+static bool tick_check_preferred(struct clock_event_device *curdev,
+                                struct clock_event_device *newdev)
+{
+       /* Prefer oneshot capable device */
+       if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
+               if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
+                       return false;
+               if (tick_oneshot_mode_active())
+                       return false;
+       }
+
+       /*
+        * Use the higher rated one, but prefer a CPU local device with a lower
+        * rating than a non-CPU local device
+        */
+       return !curdev ||
+               newdev->rating > curdev->rating ||
+              !cpumask_equal(curdev->cpumask, newdev->cpumask);
+}
+
+/*
+ * Check whether the new device is a better fit than curdev. curdev
+ * can be NULL !
+ */
+bool tick_check_replacement(struct clock_event_device *curdev,
+                           struct clock_event_device *newdev)
+{
+       if (tick_check_percpu(curdev, newdev, smp_processor_id()))
+               return false;
+
+       return tick_check_preferred(curdev, newdev);
+}
+
 /*
- * Check, if the new registered device should be used.
+ * Check, if the new registered device should be used. Called with
+ * clockevents_lock held and interrupts disabled.
  */
-static int tick_check_new_device(struct clock_event_device *newdev)
+void tick_check_new_device(struct clock_event_device *newdev)
 {
        struct clock_event_device *curdev;
        struct tick_device *td;
-       int cpu, ret = NOTIFY_OK;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&tick_device_lock, flags);
+       int cpu;
 
        cpu = smp_processor_id();
        if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -225,40 +284,15 @@ static int tick_check_new_device(struct clock_event_device *newdev)
        curdev = td->evtdev;
 
        /* cpu local device ? */
-       if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
-
-               /*
-                * If the cpu affinity of the device interrupt can not
-                * be set, ignore it.
-                */
-               if (!irq_can_set_affinity(newdev->irq))
-                       goto out_bc;
+       if (!tick_check_percpu(curdev, newdev, cpu))
+               goto out_bc;
 
-               /*
-                * If we have a cpu local device already, do not replace it
-                * by a non cpu local device
-                */
-               if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
-                       goto out_bc;
-       }
+       /* Preference decision */
+       if (!tick_check_preferred(curdev, newdev))
+               goto out_bc;
 
-       /*
-        * If we have an active device, then check the rating and the oneshot
-        * feature.
-        */
-       if (curdev) {
-               /*
-                * Prefer one shot capable devices !
-                */
-               if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
-                   !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
-                       goto out_bc;
-               /*
-                * Check the rating
-                */
-               if (curdev->rating >= newdev->rating)
-                       goto out_bc;
-       }
+       if (!try_module_get(newdev->owner))
+               return;
 
        /*
         * Replace the eventually existing device by the new
@@ -273,20 +307,13 @@ static int tick_check_new_device(struct clock_event_device *newdev)
        tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
        if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
                tick_oneshot_notify();
-
-       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
-       return NOTIFY_STOP;
+       return;
 
 out_bc:
        /*
         * Can the new device be used as a broadcast device ?
         */
-       if (tick_check_broadcast_device(newdev))
-               ret = NOTIFY_STOP;
-
-       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
-
-       return ret;
+       tick_install_broadcast_device(newdev);
 }
 
 /*
@@ -294,7 +321,7 @@ out_bc:
  *
  * Called with interrupts disabled.
  */
-static void tick_handover_do_timer(int *cpup)
+void tick_handover_do_timer(int *cpup)
 {
        if (*cpup == tick_do_timer_cpu) {
                int cpu = cpumask_first(cpu_online_mask);
@@ -311,13 +338,11 @@ static void tick_handover_do_timer(int *cpup)
  * access the hardware device itself.
  * We just set the mode and remove it from the lists.
  */
-static void tick_shutdown(unsigned int *cpup)
+void tick_shutdown(unsigned int *cpup)
 {
        struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
        struct clock_event_device *dev = td->evtdev;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&tick_device_lock, flags);
        td->mode = TICKDEV_MODE_PERIODIC;
        if (dev) {
                /*
@@ -329,26 +354,20 @@ static void tick_shutdown(unsigned int *cpup)
                dev->event_handler = clockevents_handle_noop;
                td->evtdev = NULL;
        }
-       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
-static void tick_suspend(void)
+void tick_suspend(void)
 {
        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&tick_device_lock, flags);
        clockevents_shutdown(td->evtdev);
-       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
-static void tick_resume(void)
+void tick_resume(void)
 {
        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
-       unsigned long flags;
        int broadcast = tick_resume_broadcast();
 
-       raw_spin_lock_irqsave(&tick_device_lock, flags);
        clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
 
        if (!broadcast) {
@@ -357,68 +376,12 @@ static void tick_resume(void)
                else
                        tick_resume_oneshot();
        }
-       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
-/*
- * Notification about clock event devices
- */
-static int tick_notify(struct notifier_block *nb, unsigned long reason,
-                              void *dev)
-{
-       switch (reason) {
-
-       case CLOCK_EVT_NOTIFY_ADD:
-               return tick_check_new_device(dev);
-
-       case CLOCK_EVT_NOTIFY_BROADCAST_ON:
-       case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
-       case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
-               tick_broadcast_on_off(reason, dev);
-               break;
-
-       case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
-       case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
-               tick_broadcast_oneshot_control(reason);
-               break;
-
-       case CLOCK_EVT_NOTIFY_CPU_DYING:
-               tick_handover_do_timer(dev);
-               break;
-
-       case CLOCK_EVT_NOTIFY_CPU_DEAD:
-               tick_shutdown_broadcast_oneshot(dev);
-               tick_shutdown_broadcast(dev);
-               tick_shutdown(dev);
-               break;
-
-       case CLOCK_EVT_NOTIFY_SUSPEND:
-               tick_suspend();
-               tick_suspend_broadcast();
-               break;
-
-       case CLOCK_EVT_NOTIFY_RESUME:
-               tick_resume();
-               break;
-
-       default:
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block tick_notifier = {
-       .notifier_call = tick_notify,
-};
-
 /**
  * tick_init - initialize the tick control
- *
- * Register the notifier with the clockevents framework
  */
 void __init tick_init(void)
 {
-       clockevents_register_notifier(&tick_notifier);
        tick_broadcast_init();
 }