1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/power/main.c - PM subsystem core functionality.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
9 #include <linux/export.h>
10 #include <linux/kobject.h>
11 #include <linux/string.h>
12 #include <linux/pm-trace.h>
13 #include <linux/workqueue.h>
14 #include <linux/debugfs.h>
15 #include <linux/seq_file.h>
16 #include <linux/suspend.h>
17 #include <linux/syscalls.h>
18 #include <linux/pm_runtime.h>
22 #ifdef CONFIG_PM_SLEEP
24 void lock_system_sleep(void)
26 current->flags |= PF_FREEZER_SKIP;
27 mutex_lock(&system_transition_mutex);
29 EXPORT_SYMBOL_GPL(lock_system_sleep);
31 void unlock_system_sleep(void)
34 * Don't use freezer_count() because we don't want the call to
35 * try_to_freeze() here.
38 * Fundamentally, we just don't need it, because freezing condition
39 * doesn't come into effect until we release the
40 * system_transition_mutex lock, since the freezer always works with
41 * system_transition_mutex held.
43 * More importantly, in the case of hibernation,
44 * unlock_system_sleep() gets called in snapshot_read() and
45 * snapshot_write() when the freezing condition is still in effect.
46 * Which means, if we use try_to_freeze() here, it would make them
47 * enter the refrigerator, thus causing hibernation to lockup.
49 current->flags &= ~PF_FREEZER_SKIP;
50 mutex_unlock(&system_transition_mutex);
52 EXPORT_SYMBOL_GPL(unlock_system_sleep);
54 void ksys_sync_helper(void)
61 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
62 pr_info("Filesystems sync: %ld.%03ld seconds\n",
63 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
65 EXPORT_SYMBOL_GPL(ksys_sync_helper);
67 /* Routines for PM-transition notifications */
69 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
71 int register_pm_notifier(struct notifier_block *nb)
73 return blocking_notifier_chain_register(&pm_chain_head, nb);
75 EXPORT_SYMBOL_GPL(register_pm_notifier);
77 int unregister_pm_notifier(struct notifier_block *nb)
79 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
81 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
83 int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
87 ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
88 nr_to_call, nr_calls);
90 return notifier_to_errno(ret);
92 int pm_notifier_call_chain(unsigned long val)
94 return __pm_notifier_call_chain(val, -1, NULL);
97 /* If set, devices may be suspended and resumed asynchronously. */
98 int pm_async_enabled = 1;
100 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
103 return sprintf(buf, "%d\n", pm_async_enabled);
106 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
107 const char *buf, size_t n)
111 if (kstrtoul(buf, 10, &val))
117 pm_async_enabled = val;
121 power_attr(pm_async);
123 #ifdef CONFIG_SUSPEND
124 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
130 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
131 if (mem_sleep_states[i]) {
132 const char *label = mem_sleep_states[i];
134 if (mem_sleep_current == i)
135 s += sprintf(s, "[%s] ", label);
137 s += sprintf(s, "%s ", label);
140 /* Convert the last space to a newline if needed. */
147 static suspend_state_t decode_suspend_state(const char *buf, size_t n)
149 suspend_state_t state;
153 p = memchr(buf, '\n', n);
154 len = p ? p - buf : n;
156 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
157 const char *label = mem_sleep_states[state];
159 if (label && len == strlen(label) && !strncmp(buf, label, len))
163 return PM_SUSPEND_ON;
166 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
167 const char *buf, size_t n)
169 suspend_state_t state;
172 error = pm_autosleep_lock();
176 if (pm_autosleep_state() > PM_SUSPEND_ON) {
181 state = decode_suspend_state(buf, n);
182 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON)
183 mem_sleep_current = state;
188 pm_autosleep_unlock();
189 return error ? error : n;
192 power_attr(mem_sleep);
193 #endif /* CONFIG_SUSPEND */
195 #ifdef CONFIG_PM_SLEEP_DEBUG
196 int pm_test_level = TEST_NONE;
198 static const char * const pm_tests[__TEST_AFTER_LAST] = {
199 [TEST_NONE] = "none",
200 [TEST_CORE] = "core",
201 [TEST_CPUS] = "processors",
202 [TEST_PLATFORM] = "platform",
203 [TEST_DEVICES] = "devices",
204 [TEST_FREEZER] = "freezer",
207 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
213 for (level = TEST_FIRST; level <= TEST_MAX; level++)
214 if (pm_tests[level]) {
215 if (level == pm_test_level)
216 s += sprintf(s, "[%s] ", pm_tests[level]);
218 s += sprintf(s, "%s ", pm_tests[level]);
222 /* convert the last space to a newline */
228 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
229 const char *buf, size_t n)
231 const char * const *s;
237 p = memchr(buf, '\n', n);
238 len = p ? p - buf : n;
243 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
244 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
245 pm_test_level = level;
250 unlock_system_sleep();
252 return error ? error : n;
256 #endif /* CONFIG_PM_SLEEP_DEBUG */
258 static char *suspend_step_name(enum suspend_stat_step step)
263 case SUSPEND_PREPARE:
265 case SUSPEND_SUSPEND:
267 case SUSPEND_SUSPEND_NOIRQ:
268 return "suspend_noirq";
269 case SUSPEND_RESUME_NOIRQ:
270 return "resume_noirq";
278 #define suspend_attr(_name) \
279 static ssize_t _name##_show(struct kobject *kobj, \
280 struct kobj_attribute *attr, char *buf) \
282 return sprintf(buf, "%d\n", suspend_stats._name); \
284 static struct kobj_attribute _name = __ATTR_RO(_name)
286 suspend_attr(success);
288 suspend_attr(failed_freeze);
289 suspend_attr(failed_prepare);
290 suspend_attr(failed_suspend);
291 suspend_attr(failed_suspend_late);
292 suspend_attr(failed_suspend_noirq);
293 suspend_attr(failed_resume);
294 suspend_attr(failed_resume_early);
295 suspend_attr(failed_resume_noirq);
297 static ssize_t last_failed_dev_show(struct kobject *kobj,
298 struct kobj_attribute *attr, char *buf)
301 char *last_failed_dev = NULL;
303 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
304 index %= REC_FAILED_NUM;
305 last_failed_dev = suspend_stats.failed_devs[index];
307 return sprintf(buf, "%s\n", last_failed_dev);
309 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
311 static ssize_t last_failed_errno_show(struct kobject *kobj,
312 struct kobj_attribute *attr, char *buf)
315 int last_failed_errno;
317 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
318 index %= REC_FAILED_NUM;
319 last_failed_errno = suspend_stats.errno[index];
321 return sprintf(buf, "%d\n", last_failed_errno);
323 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
325 static ssize_t last_failed_step_show(struct kobject *kobj,
326 struct kobj_attribute *attr, char *buf)
329 enum suspend_stat_step step;
330 char *last_failed_step = NULL;
332 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
333 index %= REC_FAILED_NUM;
334 step = suspend_stats.failed_steps[index];
335 last_failed_step = suspend_step_name(step);
337 return sprintf(buf, "%s\n", last_failed_step);
339 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
341 static struct attribute *suspend_attrs[] = {
345 &failed_prepare.attr,
346 &failed_suspend.attr,
347 &failed_suspend_late.attr,
348 &failed_suspend_noirq.attr,
350 &failed_resume_early.attr,
351 &failed_resume_noirq.attr,
352 &last_failed_dev.attr,
353 &last_failed_errno.attr,
354 &last_failed_step.attr,
358 static struct attribute_group suspend_attr_group = {
359 .name = "suspend_stats",
360 .attrs = suspend_attrs,
363 #ifdef CONFIG_DEBUG_FS
364 static int suspend_stats_show(struct seq_file *s, void *unused)
366 int i, index, last_dev, last_errno, last_step;
368 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
369 last_dev %= REC_FAILED_NUM;
370 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
371 last_errno %= REC_FAILED_NUM;
372 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
373 last_step %= REC_FAILED_NUM;
374 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
375 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
376 "success", suspend_stats.success,
377 "fail", suspend_stats.fail,
378 "failed_freeze", suspend_stats.failed_freeze,
379 "failed_prepare", suspend_stats.failed_prepare,
380 "failed_suspend", suspend_stats.failed_suspend,
381 "failed_suspend_late",
382 suspend_stats.failed_suspend_late,
383 "failed_suspend_noirq",
384 suspend_stats.failed_suspend_noirq,
385 "failed_resume", suspend_stats.failed_resume,
386 "failed_resume_early",
387 suspend_stats.failed_resume_early,
388 "failed_resume_noirq",
389 suspend_stats.failed_resume_noirq);
390 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
391 suspend_stats.failed_devs[last_dev]);
392 for (i = 1; i < REC_FAILED_NUM; i++) {
393 index = last_dev + REC_FAILED_NUM - i;
394 index %= REC_FAILED_NUM;
395 seq_printf(s, "\t\t\t%-s\n",
396 suspend_stats.failed_devs[index]);
398 seq_printf(s, " last_failed_errno:\t%-d\n",
399 suspend_stats.errno[last_errno]);
400 for (i = 1; i < REC_FAILED_NUM; i++) {
401 index = last_errno + REC_FAILED_NUM - i;
402 index %= REC_FAILED_NUM;
403 seq_printf(s, "\t\t\t%-d\n",
404 suspend_stats.errno[index]);
406 seq_printf(s, " last_failed_step:\t%-s\n",
408 suspend_stats.failed_steps[last_step]));
409 for (i = 1; i < REC_FAILED_NUM; i++) {
410 index = last_step + REC_FAILED_NUM - i;
411 index %= REC_FAILED_NUM;
412 seq_printf(s, "\t\t\t%-s\n",
414 suspend_stats.failed_steps[index]));
419 DEFINE_SHOW_ATTRIBUTE(suspend_stats);
421 static int __init pm_debugfs_init(void)
423 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
424 NULL, NULL, &suspend_stats_fops);
428 late_initcall(pm_debugfs_init);
429 #endif /* CONFIG_DEBUG_FS */
431 #endif /* CONFIG_PM_SLEEP */
433 #ifdef CONFIG_PM_SLEEP_DEBUG
435 * pm_print_times: print time taken by devices to suspend and resume.
437 * show() returns whether printing of suspend and resume times is enabled.
438 * store() accepts 0 or 1. 0 disables printing and 1 enables it.
440 bool pm_print_times_enabled;
442 static ssize_t pm_print_times_show(struct kobject *kobj,
443 struct kobj_attribute *attr, char *buf)
445 return sprintf(buf, "%d\n", pm_print_times_enabled);
448 static ssize_t pm_print_times_store(struct kobject *kobj,
449 struct kobj_attribute *attr,
450 const char *buf, size_t n)
454 if (kstrtoul(buf, 10, &val))
460 pm_print_times_enabled = !!val;
464 power_attr(pm_print_times);
466 static inline void pm_print_times_init(void)
468 pm_print_times_enabled = !!initcall_debug;
471 static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
472 struct kobj_attribute *attr,
475 return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
478 power_attr_ro(pm_wakeup_irq);
480 bool pm_debug_messages_on __read_mostly;
482 static ssize_t pm_debug_messages_show(struct kobject *kobj,
483 struct kobj_attribute *attr, char *buf)
485 return sprintf(buf, "%d\n", pm_debug_messages_on);
488 static ssize_t pm_debug_messages_store(struct kobject *kobj,
489 struct kobj_attribute *attr,
490 const char *buf, size_t n)
494 if (kstrtoul(buf, 10, &val))
500 pm_debug_messages_on = !!val;
504 power_attr(pm_debug_messages);
507 * __pm_pr_dbg - Print a suspend debug message to the kernel log.
508 * @defer: Whether or not to use printk_deferred() to print the message.
509 * @fmt: Message format.
511 * The message will be emitted if enabled through the pm_debug_messages
514 void __pm_pr_dbg(bool defer, const char *fmt, ...)
516 struct va_format vaf;
519 if (!pm_debug_messages_on)
528 printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
530 printk(KERN_DEBUG "PM: %pV", &vaf);
535 #else /* !CONFIG_PM_SLEEP_DEBUG */
536 static inline void pm_print_times_init(void) {}
537 #endif /* CONFIG_PM_SLEEP_DEBUG */
539 struct kobject *power_kobj;
542 * state - control system sleep states.
544 * show() returns available sleep state labels, which may be "mem", "standby",
545 * "freeze" and "disk" (hibernation).
546 * See Documentation/admin-guide/pm/sleep-states.rst for a description of
549 * store() accepts one of those strings, translates it into the proper
550 * enumerated value, and initiates a suspend transition.
552 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
556 #ifdef CONFIG_SUSPEND
559 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
561 s += sprintf(s,"%s ", pm_states[i]);
564 if (hibernation_available())
565 s += sprintf(s, "disk ");
567 /* convert the last space to a newline */
572 static suspend_state_t decode_state(const char *buf, size_t n)
574 #ifdef CONFIG_SUSPEND
575 suspend_state_t state;
580 p = memchr(buf, '\n', n);
581 len = p ? p - buf : n;
583 /* Check hibernation first. */
584 if (len == 4 && str_has_prefix(buf, "disk"))
585 return PM_SUSPEND_MAX;
587 #ifdef CONFIG_SUSPEND
588 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
589 const char *label = pm_states[state];
591 if (label && len == strlen(label) && !strncmp(buf, label, len))
596 return PM_SUSPEND_ON;
599 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
600 const char *buf, size_t n)
602 suspend_state_t state;
605 error = pm_autosleep_lock();
609 if (pm_autosleep_state() > PM_SUSPEND_ON) {
614 state = decode_state(buf, n);
615 if (state < PM_SUSPEND_MAX) {
616 if (state == PM_SUSPEND_MEM)
617 state = mem_sleep_current;
619 error = pm_suspend(state);
620 } else if (state == PM_SUSPEND_MAX) {
627 pm_autosleep_unlock();
628 return error ? error : n;
633 #ifdef CONFIG_PM_SLEEP
635 * The 'wakeup_count' attribute, along with the functions defined in
636 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
637 * handled in a non-racy way.
639 * If a wakeup event occurs when the system is in a sleep state, it simply is
640 * woken up. In turn, if an event that would wake the system up from a sleep
641 * state occurs when it is undergoing a transition to that sleep state, the
642 * transition should be aborted. Moreover, if such an event occurs when the
643 * system is in the working state, an attempt to start a transition to the
644 * given sleep state should fail during certain period after the detection of
645 * the event. Using the 'state' attribute alone is not sufficient to satisfy
646 * these requirements, because a wakeup event may occur exactly when 'state'
647 * is being written to and may be delivered to user space right before it is
648 * frozen, so the event will remain only partially processed until the system is
649 * woken up by another event. In particular, it won't cause the transition to
650 * a sleep state to be aborted.
652 * This difficulty may be overcome if user space uses 'wakeup_count' before
653 * writing to 'state'. It first should read from 'wakeup_count' and store
654 * the read value. Then, after carrying out its own preparations for the system
655 * transition to a sleep state, it should write the stored value to
656 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
657 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
658 * is allowed to write to 'state', but the transition will be aborted if there
659 * are any wakeup events detected after 'wakeup_count' was written to.
662 static ssize_t wakeup_count_show(struct kobject *kobj,
663 struct kobj_attribute *attr,
668 return pm_get_wakeup_count(&val, true) ?
669 sprintf(buf, "%u\n", val) : -EINTR;
672 static ssize_t wakeup_count_store(struct kobject *kobj,
673 struct kobj_attribute *attr,
674 const char *buf, size_t n)
679 error = pm_autosleep_lock();
683 if (pm_autosleep_state() > PM_SUSPEND_ON) {
689 if (sscanf(buf, "%u", &val) == 1) {
690 if (pm_save_wakeup_count(val))
693 pm_print_active_wakeup_sources();
697 pm_autosleep_unlock();
701 power_attr(wakeup_count);
703 #ifdef CONFIG_PM_AUTOSLEEP
704 static ssize_t autosleep_show(struct kobject *kobj,
705 struct kobj_attribute *attr,
708 suspend_state_t state = pm_autosleep_state();
710 if (state == PM_SUSPEND_ON)
711 return sprintf(buf, "off\n");
713 #ifdef CONFIG_SUSPEND
714 if (state < PM_SUSPEND_MAX)
715 return sprintf(buf, "%s\n", pm_states[state] ?
716 pm_states[state] : "error");
718 #ifdef CONFIG_HIBERNATION
719 return sprintf(buf, "disk\n");
721 return sprintf(buf, "error");
725 static ssize_t autosleep_store(struct kobject *kobj,
726 struct kobj_attribute *attr,
727 const char *buf, size_t n)
729 suspend_state_t state = decode_state(buf, n);
732 if (state == PM_SUSPEND_ON
733 && strcmp(buf, "off") && strcmp(buf, "off\n"))
736 if (state == PM_SUSPEND_MEM)
737 state = mem_sleep_current;
739 error = pm_autosleep_set_state(state);
740 return error ? error : n;
743 power_attr(autosleep);
744 #endif /* CONFIG_PM_AUTOSLEEP */
746 #ifdef CONFIG_PM_WAKELOCKS
747 static ssize_t wake_lock_show(struct kobject *kobj,
748 struct kobj_attribute *attr,
751 return pm_show_wakelocks(buf, true);
754 static ssize_t wake_lock_store(struct kobject *kobj,
755 struct kobj_attribute *attr,
756 const char *buf, size_t n)
758 int error = pm_wake_lock(buf);
759 return error ? error : n;
762 power_attr(wake_lock);
764 static ssize_t wake_unlock_show(struct kobject *kobj,
765 struct kobj_attribute *attr,
768 return pm_show_wakelocks(buf, false);
771 static ssize_t wake_unlock_store(struct kobject *kobj,
772 struct kobj_attribute *attr,
773 const char *buf, size_t n)
775 int error = pm_wake_unlock(buf);
776 return error ? error : n;
779 power_attr(wake_unlock);
781 #endif /* CONFIG_PM_WAKELOCKS */
782 #endif /* CONFIG_PM_SLEEP */
784 #ifdef CONFIG_PM_TRACE
785 int pm_trace_enabled;
787 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
790 return sprintf(buf, "%d\n", pm_trace_enabled);
794 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
795 const char *buf, size_t n)
799 if (sscanf(buf, "%d", &val) == 1) {
800 pm_trace_enabled = !!val;
801 if (pm_trace_enabled) {
802 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
803 "PM: Correct system time has to be restored manually after resume.\n");
810 power_attr(pm_trace);
812 static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
813 struct kobj_attribute *attr,
816 return show_trace_dev_match(buf, PAGE_SIZE);
819 power_attr_ro(pm_trace_dev_match);
821 #endif /* CONFIG_PM_TRACE */
823 #ifdef CONFIG_FREEZER
824 static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
825 struct kobj_attribute *attr, char *buf)
827 return sprintf(buf, "%u\n", freeze_timeout_msecs);
830 static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
831 struct kobj_attribute *attr,
832 const char *buf, size_t n)
836 if (kstrtoul(buf, 10, &val))
839 freeze_timeout_msecs = val;
843 power_attr(pm_freeze_timeout);
845 #endif /* CONFIG_FREEZER*/
847 static struct attribute * g[] = {
849 #ifdef CONFIG_PM_TRACE
851 &pm_trace_dev_match_attr.attr,
853 #ifdef CONFIG_PM_SLEEP
855 &wakeup_count_attr.attr,
856 #ifdef CONFIG_SUSPEND
857 &mem_sleep_attr.attr,
859 #ifdef CONFIG_PM_AUTOSLEEP
860 &autosleep_attr.attr,
862 #ifdef CONFIG_PM_WAKELOCKS
863 &wake_lock_attr.attr,
864 &wake_unlock_attr.attr,
866 #ifdef CONFIG_PM_SLEEP_DEBUG
868 &pm_print_times_attr.attr,
869 &pm_wakeup_irq_attr.attr,
870 &pm_debug_messages_attr.attr,
873 #ifdef CONFIG_FREEZER
874 &pm_freeze_timeout_attr.attr,
879 static const struct attribute_group attr_group = {
883 static const struct attribute_group *attr_groups[] = {
885 #ifdef CONFIG_PM_SLEEP
891 struct workqueue_struct *pm_wq;
892 EXPORT_SYMBOL_GPL(pm_wq);
894 static int __init pm_start_workqueue(void)
896 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
898 return pm_wq ? 0 : -ENOMEM;
901 static int __init pm_init(void)
903 int error = pm_start_workqueue();
906 hibernate_image_size_init();
907 hibernate_reserved_size_init();
909 power_kobj = kobject_create_and_add("power", NULL);
912 error = sysfs_create_groups(power_kobj, attr_groups);
915 pm_print_times_init();
916 return pm_autosleep_init();
919 core_initcall(pm_init);