2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/atomic.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/cpu_pm.h>
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/kthread.h>
22 #include <linux/wait.h>
23 #include <linux/time.h>
24 #include <linux/clockchips.h>
25 #include <linux/hrtimer.h>
26 #include <linux/tick.h>
27 #include <linux/notifier.h>
29 #include <linux/mutex.h>
30 #include <linux/smp.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 #include <linux/sysfs.h>
34 #include <linux/irqchip/arm-gic.h>
35 #include <linux/moduleparam.h>
37 #include <asm/smp_plat.h>
38 #include <asm/cacheflush.h>
39 #include <asm/cputype.h>
40 #include <asm/suspend.h>
42 #include <asm/bL_switcher.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/power_cpu_migrate.h>
49 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
50 * __attribute_const__ and we don't want the compiler to assume any
51 * constness here as the value _does_ change along some code paths.
54 static int read_mpidr(void)
57 asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (id));
58 return id & MPIDR_HWID_BITMASK;
62 * Get a global nanosecond time stamp for tracing.
64 static s64 get_ns(void)
68 return timespec_to_ns(&ts);
72 * bL switcher core code.
75 static void bL_do_switch(void *_arg)
77 unsigned ib_mpidr, ib_cpu, ib_cluster;
78 long volatile handshake, **handshake_ptr = _arg;
80 pr_debug("%s\n", __func__);
82 ib_mpidr = cpu_logical_map(smp_processor_id());
83 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
84 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
86 /* Advertise our handshake location */
89 *handshake_ptr = &handshake;
94 * Our state has been saved at this point. Let's release our
97 mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
101 * From this point, we must assume that our counterpart CPU might
102 * have taken over in its parallel world already, as if execution
103 * just returned from cpu_suspend(). It is therefore important to
104 * be very careful not to make any change the other guy is not
105 * expecting. This is why we need stack isolation.
107 * Fancy under cover tasks could be performed here. For now
112 * Let's wait until our inbound is alive.
119 /* Let's put ourself down. */
120 mcpm_cpu_power_down();
122 /* should never get here */
127 * Stack isolation. To ensure 'current' remains valid, we just use another
128 * piece of our thread's stack space which should be fairly lightly used.
129 * The selected area starts just above the thread_info structure located
130 * at the very bottom of the stack, aligned to a cache line, and indexed
131 * with the cluster number.
133 #define STACK_SIZE 512
134 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
135 static int bL_switchpoint(unsigned long _arg)
137 unsigned int mpidr = read_mpidr();
138 unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
139 void *stack = current_thread_info() + 1;
140 stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
141 stack += clusterid * STACK_SIZE + STACK_SIZE;
142 call_with_stack(bL_do_switch, (void *)_arg, stack);
147 * Generic switcher interface
150 static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
151 static int bL_switcher_cpu_pairing[NR_CPUS];
154 * bL_switch_to - Switch to a specific cluster for the current CPU
155 * @new_cluster_id: the ID of the cluster to switch to.
157 * This function must be called on the CPU to be switched.
158 * Returns 0 on success, else a negative status code.
160 static int bL_switch_to(unsigned int new_cluster_id)
162 unsigned int mpidr, this_cpu, that_cpu;
163 unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
164 struct completion inbound_alive;
165 struct tick_device *tdev;
166 enum clock_event_mode tdev_mode;
167 long volatile *handshake_ptr;
170 this_cpu = smp_processor_id();
171 ob_mpidr = read_mpidr();
172 ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
173 ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
174 BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
176 if (new_cluster_id == ob_cluster)
179 that_cpu = bL_switcher_cpu_pairing[this_cpu];
180 ib_mpidr = cpu_logical_map(that_cpu);
181 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
182 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
184 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
185 this_cpu, ob_mpidr, ib_mpidr);
187 this_cpu = smp_processor_id();
189 /* Close the gate for our entry vectors */
190 mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
191 mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
193 /* Install our "inbound alive" notifier. */
194 init_completion(&inbound_alive);
195 ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
196 ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
197 mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
200 * Let's wake up the inbound CPU now in case it requires some delay
201 * to come online, but leave it gated in our entry vector code.
203 ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
205 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
210 * Raise a SGI on the inbound CPU to make sure it doesn't stall
211 * in a possible WFI, such as in bL_power_down().
213 gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
216 * Wait for the inbound to come up. This allows for other
217 * tasks to be scheduled in the mean time.
219 wait_for_completion(&inbound_alive);
220 mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
223 * From this point we are entering the switch critical zone
224 * and can't sleep/schedule anymore.
228 trace_cpu_migrate_begin(get_ns(), ob_mpidr);
230 /* redirect GIC's SGIs to our counterpart */
231 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
233 tdev = tick_get_device(this_cpu);
234 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
237 tdev_mode = tdev->evtdev->mode;
238 clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
241 ret = cpu_pm_enter();
243 /* we can not tolerate errors at this point */
245 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
248 * Swap the physical CPUs in the logical map for this logical CPU.
249 * This must be flushed to RAM as the resume code
250 * needs to access it while the caches are still disabled.
252 cpu_logical_map(this_cpu) = ib_mpidr;
253 cpu_logical_map(that_cpu) = ob_mpidr;
254 sync_cache_w(&cpu_logical_map(this_cpu));
256 /* Let's do the actual CPU switch. */
257 ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
259 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
261 /* We are executing on the inbound CPU at this point */
262 mpidr = read_mpidr();
263 pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
264 BUG_ON(mpidr != ib_mpidr);
266 mcpm_cpu_powered_up();
271 clockevents_set_mode(tdev->evtdev, tdev_mode);
272 clockevents_program_event(tdev->evtdev,
273 tdev->evtdev->next_event, 1);
276 trace_cpu_migrate_finish(get_ns(), ib_mpidr);
284 pr_err("%s exiting with error %d\n", __func__, ret);
290 struct task_struct *task;
291 wait_queue_head_t wq;
293 struct completion started;
294 bL_switch_completion_handler completer;
295 void *completer_cookie;
298 static struct bL_thread bL_threads[NR_CPUS];
300 static int bL_switcher_thread(void *arg)
302 struct bL_thread *t = arg;
303 struct sched_param param = { .sched_priority = 1 };
305 bL_switch_completion_handler completer;
306 void *completer_cookie;
308 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
309 complete(&t->started);
312 if (signal_pending(current))
313 flush_signals(current);
314 wait_event_interruptible(t->wq,
315 t->wanted_cluster != -1 ||
316 kthread_should_stop());
319 cluster = t->wanted_cluster;
320 completer = t->completer;
321 completer_cookie = t->completer_cookie;
322 t->wanted_cluster = -1;
324 spin_unlock(&t->lock);
327 bL_switch_to(cluster);
330 completer(completer_cookie);
332 } while (!kthread_should_stop());
337 static struct task_struct * bL_switcher_thread_create(int cpu, void *arg)
339 struct task_struct *task;
341 task = kthread_create_on_node(bL_switcher_thread, arg,
342 cpu_to_node(cpu), "kswitcher_%d", cpu);
344 kthread_bind(task, cpu);
345 wake_up_process(task);
347 pr_err("%s failed for CPU %d\n", __func__, cpu);
352 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
353 * with completion notification via a callback
355 * @cpu: the CPU to switch
356 * @new_cluster_id: the ID of the cluster to switch to.
357 * @completer: switch completion callback. if non-NULL,
358 * @completer(@completer_cookie) will be called on completion of
359 * the switch, in non-atomic context.
360 * @completer_cookie: opaque context argument for @completer.
362 * This function causes a cluster switch on the given CPU by waking up
363 * the appropriate switcher thread. This function may or may not return
364 * before the switch has occurred.
366 * If a @completer callback function is supplied, it will be called when
367 * the switch is complete. This can be used to determine asynchronously
368 * when the switch is complete, regardless of when bL_switch_request()
369 * returns. When @completer is supplied, no new switch request is permitted
370 * for the affected CPU until after the switch is complete, and @completer
373 int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
374 bL_switch_completion_handler completer,
375 void *completer_cookie)
379 if (cpu >= ARRAY_SIZE(bL_threads)) {
380 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
384 t = &bL_threads[cpu];
387 return PTR_ERR(t->task);
393 spin_unlock(&t->lock);
396 t->completer = completer;
397 t->completer_cookie = completer_cookie;
398 t->wanted_cluster = new_cluster_id;
399 spin_unlock(&t->lock);
404 EXPORT_SYMBOL_GPL(bL_switch_request_cb);
407 * Detach an outstanding switch request.
409 * The switcher will continue with the switch request in the background,
410 * but the completer function will not be called.
412 * This may be necessary if the completer is in a kernel module which is
413 * about to be unloaded.
415 void bL_switch_request_detach(unsigned int cpu,
416 bL_switch_completion_handler completer)
420 if (cpu >= ARRAY_SIZE(bL_threads)) {
421 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
425 t = &bL_threads[cpu];
427 if (IS_ERR(t->task) || !t->task)
431 if (t->completer == completer)
433 spin_unlock(&t->lock);
436 EXPORT_SYMBOL_GPL(bL_switch_request_detach);
439 * Activation and configuration code.
442 static DEFINE_MUTEX(bL_switcher_activation_lock);
443 static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
444 static unsigned int bL_switcher_active;
445 static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
446 static cpumask_t bL_switcher_removed_logical_cpus;
448 int bL_switcher_register_notifier(struct notifier_block *nb)
450 return blocking_notifier_chain_register(&bL_activation_notifier, nb);
452 EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
454 int bL_switcher_unregister_notifier(struct notifier_block *nb)
456 return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
458 EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
460 static int bL_activation_notify(unsigned long val)
464 ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
465 if (ret & NOTIFY_STOP_MASK)
466 pr_err("%s: notifier chain failed with status 0x%x\n",
468 return notifier_to_errno(ret);
471 static void bL_switcher_restore_cpus(void)
475 for_each_cpu(i, &bL_switcher_removed_logical_cpus)
479 static int bL_switcher_halve_cpus(void)
481 int i, j, cluster_0, gic_id, ret;
482 unsigned int cpu, cluster, mask;
483 cpumask_t available_cpus;
485 /* First pass to validate what we have */
487 for_each_online_cpu(i) {
488 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
489 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
491 pr_err("%s: only dual cluster systems are supported\n", __func__);
494 if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
496 mask |= (1 << cluster);
499 pr_err("%s: no CPU pairing possible\n", __func__);
504 * Now let's do the pairing. We match each CPU with another CPU
505 * from a different cluster. To get a uniform scheduling behavior
506 * without fiddling with CPU topology and compute capacity data,
507 * we'll use logical CPUs initially belonging to the same cluster.
509 memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
510 cpumask_copy(&available_cpus, cpu_online_mask);
512 for_each_cpu(i, &available_cpus) {
514 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
517 if (cluster != cluster_0)
519 cpumask_clear_cpu(i, &available_cpus);
520 for_each_cpu(j, &available_cpus) {
521 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
523 * Let's remember the last match to create "odd"
524 * pairing on purpose in order for other code not
525 * to assume any relation between physical and
526 * logical CPU numbers.
528 if (cluster != cluster_0)
532 bL_switcher_cpu_pairing[i] = match;
533 cpumask_clear_cpu(match, &available_cpus);
534 pr_info("CPU%d paired with CPU%d\n", i, match);
539 * Now we disable the unwanted CPUs i.e. everything that has no
540 * pairing information (that includes the pairing counterparts).
542 cpumask_clear(&bL_switcher_removed_logical_cpus);
543 for_each_online_cpu(i) {
544 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
545 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
547 /* Let's take note of the GIC ID for this CPU */
548 gic_id = gic_get_cpu_id(i);
550 pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
551 bL_switcher_restore_cpus();
554 bL_gic_id[cpu][cluster] = gic_id;
555 pr_info("GIC ID for CPU %u cluster %u is %u\n",
556 cpu, cluster, gic_id);
558 if (bL_switcher_cpu_pairing[i] != -1) {
559 bL_switcher_cpu_original_cluster[i] = cluster;
565 bL_switcher_restore_cpus();
568 cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
574 /* Determine the logical CPU a given physical CPU is grouped on. */
575 int bL_switcher_get_logical_index(u32 mpidr)
579 if (!bL_switcher_active)
582 mpidr &= MPIDR_HWID_BITMASK;
583 for_each_online_cpu(cpu) {
584 int pairing = bL_switcher_cpu_pairing[cpu];
587 if ((mpidr == cpu_logical_map(cpu)) ||
588 (mpidr == cpu_logical_map(pairing)))
594 static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
596 trace_cpu_migrate_current(get_ns(), read_mpidr());
599 int bL_switcher_trace_trigger(void)
605 bL_switcher_trace_trigger_cpu(NULL);
606 ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
612 EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
614 static int bL_switcher_enable(void)
618 mutex_lock(&bL_switcher_activation_lock);
619 cpu_hotplug_driver_lock();
620 if (bL_switcher_active) {
621 cpu_hotplug_driver_unlock();
622 mutex_unlock(&bL_switcher_activation_lock);
626 pr_info("big.LITTLE switcher initializing\n");
628 ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
632 ret = bL_switcher_halve_cpus();
636 bL_switcher_trace_trigger();
638 for_each_online_cpu(cpu) {
639 struct bL_thread *t = &bL_threads[cpu];
640 spin_lock_init(&t->lock);
641 init_waitqueue_head(&t->wq);
642 init_completion(&t->started);
643 t->wanted_cluster = -1;
644 t->task = bL_switcher_thread_create(cpu, t);
647 bL_switcher_active = 1;
648 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
649 pr_info("big.LITTLE switcher initialized\n");
653 pr_warning("big.LITTLE switcher initialization failed\n");
654 bL_activation_notify(BL_NOTIFY_POST_DISABLE);
657 cpu_hotplug_driver_unlock();
658 mutex_unlock(&bL_switcher_activation_lock);
664 static void bL_switcher_disable(void)
666 unsigned int cpu, cluster;
668 struct task_struct *task;
670 mutex_lock(&bL_switcher_activation_lock);
671 cpu_hotplug_driver_lock();
673 if (!bL_switcher_active)
676 if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
677 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
681 bL_switcher_active = 0;
684 * To deactivate the switcher, we must shut down the switcher
685 * threads to prevent any other requests from being accepted.
686 * Then, if the final cluster for given logical CPU is not the
687 * same as the original one, we'll recreate a switcher thread
688 * just for the purpose of switching the CPU back without any
689 * possibility for interference from external requests.
691 for_each_online_cpu(cpu) {
692 t = &bL_threads[cpu];
695 if (!task || IS_ERR(task))
698 /* no more switch may happen on this CPU at this point */
699 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
700 if (cluster == bL_switcher_cpu_original_cluster[cpu])
702 init_completion(&t->started);
703 t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
704 task = bL_switcher_thread_create(cpu, t);
706 wait_for_completion(&t->started);
708 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
709 if (cluster == bL_switcher_cpu_original_cluster[cpu])
712 /* If execution gets here, we're in trouble. */
713 pr_crit("%s: unable to restore original cluster for CPU %d\n",
715 pr_crit("%s: CPU %d can't be restored\n",
716 __func__, bL_switcher_cpu_pairing[cpu]);
717 cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
718 &bL_switcher_removed_logical_cpus);
721 bL_switcher_restore_cpus();
722 bL_switcher_trace_trigger();
724 bL_activation_notify(BL_NOTIFY_POST_DISABLE);
727 cpu_hotplug_driver_unlock();
728 mutex_unlock(&bL_switcher_activation_lock);
731 static ssize_t bL_switcher_active_show(struct kobject *kobj,
732 struct kobj_attribute *attr, char *buf)
734 return sprintf(buf, "%u\n", bL_switcher_active);
737 static ssize_t bL_switcher_active_store(struct kobject *kobj,
738 struct kobj_attribute *attr, const char *buf, size_t count)
744 bL_switcher_disable();
748 ret = bL_switcher_enable();
754 return (ret >= 0) ? count : ret;
757 static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
758 struct kobj_attribute *attr, const char *buf, size_t count)
760 int ret = bL_switcher_trace_trigger();
762 return ret ? ret : count;
765 static struct kobj_attribute bL_switcher_active_attr =
766 __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
768 static struct kobj_attribute bL_switcher_trace_trigger_attr =
769 __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
771 static struct attribute *bL_switcher_attrs[] = {
772 &bL_switcher_active_attr.attr,
773 &bL_switcher_trace_trigger_attr.attr,
777 static struct attribute_group bL_switcher_attr_group = {
778 .attrs = bL_switcher_attrs,
781 static struct kobject *bL_switcher_kobj;
783 static int __init bL_switcher_sysfs_init(void)
787 bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
788 if (!bL_switcher_kobj)
790 ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
792 kobject_put(bL_switcher_kobj);
796 #endif /* CONFIG_SYSFS */
798 bool bL_switcher_get_enabled(void)
800 mutex_lock(&bL_switcher_activation_lock);
802 return bL_switcher_active;
804 EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
806 void bL_switcher_put_enabled(void)
808 mutex_unlock(&bL_switcher_activation_lock);
810 EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
813 * Veto any CPU hotplug operation while the switcher is active.
814 * We're just not ready to deal with that given the trickery involved.
816 static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
817 unsigned long action, void *hcpu)
821 case CPU_DOWN_PREPARE:
822 if (bL_switcher_active)
828 static struct notifier_block bL_switcher_hotplug_notifier =
829 { &bL_switcher_hotplug_callback, NULL, 0 };
831 #ifdef CONFIG_SCHED_HMP
832 static bool no_bL_switcher = true;
834 static bool no_bL_switcher;
836 core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
838 static int __init bL_switcher_init(void)
842 if (MAX_NR_CLUSTERS != 2) {
843 pr_err("%s: only dual cluster systems are supported\n", __func__);
847 register_cpu_notifier(&bL_switcher_hotplug_notifier);
849 if (!no_bL_switcher) {
850 ret = bL_switcher_enable();
856 ret = bL_switcher_sysfs_init();
858 pr_err("%s: unable to create sysfs entry\n", __func__);
864 late_initcall(bL_switcher_init);