1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2016 ARM Limited
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/atomic.h>
10 #include <linux/completion.h>
11 #include <linux/cpu.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpu_pm.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <uapi/linux/sched/types.h>
17 #include <linux/module.h>
18 #include <linux/preempt.h>
19 #include <linux/psci.h>
20 #include <linux/slab.h>
21 #include <linux/tick.h>
22 #include <linux/topology.h>
24 #include <asm/cpuidle.h>
26 #include <uapi/linux/psci.h>
28 #define NUM_SUSPEND_CYCLE (10)
30 static unsigned int nb_available_cpus;
31 static int tos_resident_cpu = -1;
33 static atomic_t nb_active_threads;
34 static struct completion suspend_threads_started =
35 COMPLETION_INITIALIZER(suspend_threads_started);
36 static struct completion suspend_threads_done =
37 COMPLETION_INITIALIZER(suspend_threads_done);
40 * We assume that PSCI operations are used if they are available. This is not
41 * necessarily true on arm64, since the decision is based on the
42 * "enable-method" property of each CPU in the DT, but given that there is no
43 * arch-specific way to check this, we assume that the DT is sensible.
45 static int psci_ops_check(void)
47 int migrate_type = -1;
50 if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
51 pr_warn("Missing PSCI operations, aborting tests\n");
55 if (psci_ops.migrate_info_type)
56 migrate_type = psci_ops.migrate_info_type();
58 if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
59 migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
60 /* There is a UP Trusted OS, find on which core it resides. */
61 for_each_online_cpu(cpu)
62 if (psci_tos_resident_on(cpu)) {
63 tos_resident_cpu = cpu;
66 if (tos_resident_cpu == -1)
67 pr_warn("UP Trusted OS resides on no online CPU\n");
74 * offlined_cpus is a temporary array but passing it as an argument avoids
75 * multiple allocations.
77 static unsigned int down_and_up_cpus(const struct cpumask *cpus,
78 struct cpumask *offlined_cpus)
83 cpumask_clear(offlined_cpus);
85 /* Try to power down all CPUs in the mask. */
86 for_each_cpu(cpu, cpus) {
87 int ret = remove_cpu(cpu);
90 * cpu_down() checks the number of online CPUs before the TOS
93 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
95 pr_err("Unexpected return code %d while trying "
96 "to power down last online CPU %d\n",
100 } else if (cpu == tos_resident_cpu) {
102 pr_err("Unexpected return code %d while trying "
103 "to power down TOS resident CPU %d\n",
107 } else if (ret != 0) {
108 pr_err("Error occurred (%d) while trying "
109 "to power down CPU %d\n", ret, cpu);
114 cpumask_set_cpu(cpu, offlined_cpus);
117 /* Try to power up all the CPUs that have been offlined. */
118 for_each_cpu(cpu, offlined_cpus) {
119 int ret = add_cpu(cpu);
122 pr_err("Error occurred (%d) while trying "
123 "to power up CPU %d\n", ret, cpu);
126 cpumask_clear_cpu(cpu, offlined_cpus);
131 * Something went bad at some point and some CPUs could not be turned
134 WARN_ON(!cpumask_empty(offlined_cpus) ||
135 num_online_cpus() != nb_available_cpus);
140 static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
143 cpumask_var_t *cpu_groups = *pcpu_groups;
145 for (i = 0; i < num; ++i)
146 free_cpumask_var(cpu_groups[i]);
150 static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
153 cpumask_var_t tmp, *cpu_groups;
155 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
158 cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
163 cpumask_copy(tmp, cpu_online_mask);
165 while (!cpumask_empty(tmp)) {
166 const struct cpumask *cpu_group =
167 topology_core_cpumask(cpumask_any(tmp));
169 if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
170 free_cpu_groups(num_groups, &cpu_groups);
173 cpumask_copy(cpu_groups[num_groups++], cpu_group);
174 cpumask_andnot(tmp, tmp, cpu_group);
177 free_cpumask_var(tmp);
178 *pcpu_groups = cpu_groups;
183 static int hotplug_tests(void)
185 int i, nb_cpu_group, err = -ENOMEM;
186 cpumask_var_t offlined_cpus, *cpu_groups;
189 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
192 nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
193 if (nb_cpu_group < 0)
195 page_buf = (char *)__get_free_page(GFP_KERNEL);
197 goto out_free_cpu_groups;
201 * Of course the last CPU cannot be powered down and cpu_down() should
204 pr_info("Trying to turn off and on again all CPUs\n");
205 err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
208 * Take down CPUs by cpu group this time. When the last CPU is turned
209 * off, the cpu group itself should shut down.
211 for (i = 0; i < nb_cpu_group; ++i) {
212 ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
214 /* Remove trailing newline. */
215 page_buf[len - 1] = '\0';
216 pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
218 err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
221 free_page((unsigned long)page_buf);
223 free_cpu_groups(nb_cpu_group, &cpu_groups);
225 free_cpumask_var(offlined_cpus);
229 static void dummy_callback(struct timer_list *unused) {}
231 static int suspend_cpu(struct cpuidle_device *dev,
232 struct cpuidle_driver *drv, int index)
234 struct cpuidle_state *state = &drv->states[index];
235 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
238 arch_cpu_idle_enter();
242 * The local timer will be shut down, we need to enter tick
245 ret = tick_broadcast_enter();
248 * In the absence of hardware broadcast mechanism,
249 * this CPU might be used to broadcast wakeups, which
250 * may be why entering tick broadcast has failed.
251 * There is little the kernel can do to work around
252 * that, so enter WFI instead (idle state 0).
260 ret = state->enter(dev, drv, index);
263 tick_broadcast_exit();
266 arch_cpu_idle_exit();
271 static int suspend_test_thread(void *arg)
274 int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
275 struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
276 struct cpuidle_device *dev;
277 struct cpuidle_driver *drv;
278 /* No need for an actual callback, we just want to wake up the CPU. */
279 struct timer_list wakeup_timer;
281 /* Wait for the main thread to give the start signal. */
282 wait_for_completion(&suspend_threads_started);
284 /* Set maximum priority to preempt all other threads on this CPU. */
285 if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
286 pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
289 dev = this_cpu_read(cpuidle_devices);
290 drv = cpuidle_get_cpu_driver(dev);
292 pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
293 cpu, drv->state_count - 1);
295 timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
296 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
299 * Test all possible states, except 0 (which is usually WFI and
302 for (index = 1; index < drv->state_count; ++index) {
304 struct cpuidle_state *state = &drv->states[index];
307 * Set the timer to wake this CPU up in some time (which
308 * should be largely sufficient for entering suspend).
309 * If the local tick is disabled when entering suspend,
310 * suspend_cpu() takes care of switching to a broadcast
311 * tick, so the timer will still wake us up.
313 mod_timer(&wakeup_timer, jiffies +
314 usecs_to_jiffies(state->target_residency));
316 /* IRQs must be disabled during suspend operations. */
319 ret = suspend_cpu(dev, drv, index);
322 * We have woken up. Re-enable IRQs to handle any
323 * pending interrupt, do not wait until the end of the
330 } else if (ret >= 0) {
331 /* We did not enter the expected state. */
334 pr_err("Failed to suspend CPU %d: error %d "
335 "(requested state %d, cycle %d)\n",
343 * Disable the timer to make sure that the timer will not trigger
346 del_timer(&wakeup_timer);
347 destroy_timer_on_stack(&wakeup_timer);
349 if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
350 complete(&suspend_threads_done);
352 /* Give up on RT scheduling and wait for termination. */
353 sched_priority.sched_priority = 0;
354 if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
355 pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
358 /* Needs to be set first to avoid missing a wakeup. */
359 set_current_state(TASK_INTERRUPTIBLE);
360 if (kthread_should_park())
365 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
366 cpu, nb_suspend, nb_shallow_sleep, nb_err);
373 static int suspend_tests(void)
376 struct task_struct **threads;
379 threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
385 * Stop cpuidle to prevent the idle tasks from entering a deep sleep
386 * mode, as it might interfere with the suspend threads on other CPUs.
387 * This does not prevent the suspend threads from using cpuidle (only
388 * the idle tasks check this status). Take the idle lock so that
389 * the cpuidle driver and device look-up can be carried out safely.
391 cpuidle_pause_and_lock();
393 for_each_online_cpu(cpu) {
394 struct task_struct *thread;
395 /* Check that cpuidle is available on that CPU. */
396 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
397 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
400 pr_warn("cpuidle not available on CPU %d, ignoring\n",
405 thread = kthread_create_on_cpu(suspend_test_thread,
406 (void *)(long)cpu, cpu,
407 "psci_suspend_test");
409 pr_err("Failed to create kthread on CPU %d\n", cpu);
411 threads[nb_threads++] = thread;
414 if (nb_threads < 1) {
419 atomic_set(&nb_active_threads, nb_threads);
422 * Wake up the suspend threads. To avoid the main thread being preempted
423 * before all the threads have been unparked, the suspend threads will
424 * wait for the completion of suspend_threads_started.
426 for (i = 0; i < nb_threads; ++i)
427 wake_up_process(threads[i]);
428 complete_all(&suspend_threads_started);
430 wait_for_completion(&suspend_threads_done);
433 /* Stop and destroy all threads, get return status. */
434 for (i = 0; i < nb_threads; ++i) {
435 err += kthread_park(threads[i]);
436 err += kthread_stop(threads[i]);
439 cpuidle_resume_and_unlock();
444 static int __init psci_checker(void)
449 * Since we're in an initcall, we assume that all the CPUs that all
450 * CPUs that can be onlined have been onlined.
452 * The tests assume that hotplug is enabled but nobody else is using it,
453 * otherwise the results will be unpredictable. However, since there
454 * is no userspace yet in initcalls, that should be fine, as long as
455 * no torture test is running at the same time (see Kconfig).
457 nb_available_cpus = num_online_cpus();
459 /* Check PSCI operations are set up and working. */
460 ret = psci_ops_check();
464 pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
466 pr_info("Starting hotplug tests\n");
467 ret = hotplug_tests();
469 pr_info("Hotplug tests passed OK\n");
471 pr_err("%d error(s) encountered in hotplug tests\n", ret);
473 pr_err("Out of memory\n");
477 pr_info("Starting suspend tests (%d cycles per state)\n",
479 ret = suspend_tests();
481 pr_info("Suspend tests passed OK\n");
483 pr_err("%d error(s) encountered in suspend tests\n", ret);
487 pr_err("Out of memory\n");
490 pr_warn("Could not start suspend tests on any CPU\n");
495 pr_info("PSCI checker completed\n");
496 return ret < 0 ? ret : 0;
498 late_initcall(psci_checker);