1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/main.c - Where the driver meets power management.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
67 static int async_error;
69 static const char *pm_verb(int event)
72 case PM_EVENT_SUSPEND:
78 case PM_EVENT_QUIESCE:
80 case PM_EVENT_HIBERNATE:
84 case PM_EVENT_RESTORE:
86 case PM_EVENT_RECOVER:
89 return "(unknown PM event)";
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device *dev)
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
110 * device_pm_lock - Lock the list of active devices used by the PM core.
112 void device_pm_lock(void)
114 mutex_lock(&dpm_list_mtx);
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 void device_pm_unlock(void)
122 mutex_unlock(&dpm_list_mtx);
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device *dev)
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device *dev)
153 if (device_pm_not_required(dev))
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
173 void device_pm_move_before(struct device *deva, struct device *devb)
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
187 void device_pm_move_after(struct device *deva, struct device *devb)
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device *dev)
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 if (!pm_print_times_enabled)
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
223 if (!pm_print_times_enabled)
226 rettime = ktime_get();
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)ktime_us_delta(rettime, calltime));
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
236 static void dpm_wait(struct device *dev, bool async)
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 dpm_wait(dev, *((bool *)async_ptr));
251 static void dpm_wait_for_children(struct device *dev, bool async)
253 device_for_each_child(dev, &async, dpm_wait_fn);
256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 struct device_link *link;
261 idx = device_links_read_lock();
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
274 device_links_read_unlock(idx);
277 static bool dpm_wait_for_superior(struct device *dev, bool async)
279 struct device *parent;
282 * If the device is resumed asynchronously and the parent's callback
283 * deletes both the device and the parent itself, the parent object may
284 * be freed while this function is running, so avoid that by reference
285 * counting the parent once more unless the device has been deleted
286 * already (in which case return right away).
288 mutex_lock(&dpm_list_mtx);
290 if (!device_pm_initialized(dev)) {
291 mutex_unlock(&dpm_list_mtx);
295 parent = get_device(dev->parent);
297 mutex_unlock(&dpm_list_mtx);
299 dpm_wait(parent, async);
302 dpm_wait_for_suppliers(dev, async);
305 * If the parent's callback has deleted the device, attempting to resume
306 * it would be invalid, so avoid doing that then.
308 return device_pm_initialized(dev);
311 static void dpm_wait_for_consumers(struct device *dev, bool async)
313 struct device_link *link;
316 idx = device_links_read_lock();
319 * The status of a device link can only be changed from "dormant" by a
320 * probe, but that cannot happen during system suspend/resume. In
321 * theory it can change to "dormant" at that time, but then it is
322 * reasonable to wait for the target device anyway (eg. if it goes
323 * away, it's better to wait for it to go away completely and then
324 * continue instead of trying to continue in parallel with its
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 dpm_wait(link->consumer, async);
331 device_links_read_unlock(idx);
334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
341 * pm_op - Return the PM operation appropriate for given PM event.
342 * @ops: PM operations to choose from.
343 * @state: PM transition of the system being carried out.
345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347 switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 case PM_EVENT_SUSPEND:
351 case PM_EVENT_RESUME:
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 case PM_EVENT_FREEZE:
356 case PM_EVENT_QUIESCE:
358 case PM_EVENT_HIBERNATE:
359 return ops->poweroff;
361 case PM_EVENT_RECOVER:
363 case PM_EVENT_RESTORE:
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
373 * @ops: PM operations to choose from.
374 * @state: PM transition of the system being carried out.
376 * Runtime PM is disabled for @dev while this function is being executed.
378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
381 switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
407 * @ops: PM operations to choose from.
408 * @state: PM transition of the system being carried out.
410 * The driver of @dev will not receive interrupts while this function is being
413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
482 calltime = initcall_debug_start(dev, cb);
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(cb, error);
490 initcall_debug_report(dev, calltime, cb, error);
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
498 struct task_struct *tsk;
499 struct timer_list timer;
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507 * @t: The timer that PM watchdog depends on.
509 * Called when a driver has timed out suspending or resuming.
510 * There's not much we can do here to recover so panic() to
511 * capture a crash-dump in pstore.
513 static void dpm_watchdog_handler(struct timer_list *t)
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 show_stack(wd->tsk, NULL, KERN_EMERG);
519 panic("%s %s: unrecoverable failure\n",
520 dev_driver_string(wd->dev), dev_name(wd->dev));
524 * dpm_watchdog_set - Enable pm watchdog for given device.
525 * @wd: Watchdog. Must be allocated on the stack.
526 * @dev: Device to handle.
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 struct timer_list *timer = &wd->timer;
535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536 /* use same timeout value for both suspend and resume */
537 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
543 * @wd: Watchdog to disable.
545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 struct timer_list *timer = &wd->timer;
549 del_timer_sync(timer);
550 destroy_timer_on_stack(timer);
553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554 #define dpm_watchdog_set(x, y)
555 #define dpm_watchdog_clear(x)
558 /*------------------------- Resume routines -------------------------*/
561 * dev_pm_skip_resume - System-wide device resume optimization check.
562 * @dev: Target device.
565 * - %false if the transition under way is RESTORE.
566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567 * - The logical negation of %power.must_resume otherwise (that is, when the
568 * transition under way is RESUME).
570 bool dev_pm_skip_resume(struct device *dev)
572 if (pm_transition.event == PM_EVENT_RESTORE)
575 if (pm_transition.event == PM_EVENT_THAW)
576 return dev_pm_skip_suspend(dev);
578 return !dev->power.must_resume;
582 * device_resume_noirq - Execute a "noirq resume" callback for given device.
583 * @dev: Device to handle.
584 * @state: PM transition of the system being carried out.
585 * @async: If true, the device is being resumed asynchronously.
587 * The driver of @dev will not receive interrupts while this function is being
590 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
592 pm_callback_t callback = NULL;
593 const char *info = NULL;
600 if (dev->power.syscore || dev->power.direct_complete)
603 if (!dev->power.is_noirq_suspended)
606 if (!dpm_wait_for_superior(dev, async))
609 skip_resume = dev_pm_skip_resume(dev);
611 * If the driver callback is skipped below or by the middle layer
612 * callback and device_resume_early() also skips the driver callback for
613 * this device later, it needs to appear as "suspended" to PM-runtime,
614 * so change its status accordingly.
616 * Otherwise, the device is going to be resumed, so set its PM-runtime
617 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
618 * to avoid confusing drivers that don't use it.
621 pm_runtime_set_suspended(dev);
622 else if (dev_pm_skip_suspend(dev))
623 pm_runtime_set_active(dev);
625 if (dev->pm_domain) {
626 info = "noirq power domain ";
627 callback = pm_noirq_op(&dev->pm_domain->ops, state);
628 } else if (dev->type && dev->type->pm) {
629 info = "noirq type ";
630 callback = pm_noirq_op(dev->type->pm, state);
631 } else if (dev->class && dev->class->pm) {
632 info = "noirq class ";
633 callback = pm_noirq_op(dev->class->pm, state);
634 } else if (dev->bus && dev->bus->pm) {
636 callback = pm_noirq_op(dev->bus->pm, state);
644 if (dev->driver && dev->driver->pm) {
645 info = "noirq driver ";
646 callback = pm_noirq_op(dev->driver->pm, state);
650 error = dpm_run_callback(callback, dev, state, info);
653 dev->power.is_noirq_suspended = false;
656 complete_all(&dev->power.completion);
661 static bool is_async(struct device *dev)
663 return dev->power.async_suspend && pm_async_enabled
664 && !pm_trace_is_enabled();
667 static bool dpm_async_fn(struct device *dev, async_func_t func)
669 reinit_completion(&dev->power.completion);
673 async_schedule_dev(func, dev);
680 static void async_resume_noirq(void *data, async_cookie_t cookie)
682 struct device *dev = (struct device *)data;
685 error = device_resume_noirq(dev, pm_transition, true);
687 pm_dev_err(dev, pm_transition, " async", error);
692 static void dpm_noirq_resume_devices(pm_message_t state)
695 ktime_t starttime = ktime_get();
697 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
698 mutex_lock(&dpm_list_mtx);
699 pm_transition = state;
702 * Advanced the async threads upfront,
703 * in case the starting of async threads is
704 * delayed by non-async resuming devices.
706 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
707 dpm_async_fn(dev, async_resume_noirq);
709 while (!list_empty(&dpm_noirq_list)) {
710 dev = to_device(dpm_noirq_list.next);
712 list_move_tail(&dev->power.entry, &dpm_late_early_list);
713 mutex_unlock(&dpm_list_mtx);
715 if (!is_async(dev)) {
718 error = device_resume_noirq(dev, state, false);
720 suspend_stats.failed_resume_noirq++;
721 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
722 dpm_save_failed_dev(dev_name(dev));
723 pm_dev_err(dev, state, " noirq", error);
727 mutex_lock(&dpm_list_mtx);
730 mutex_unlock(&dpm_list_mtx);
731 async_synchronize_full();
732 dpm_show_time(starttime, state, 0, "noirq");
733 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
737 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
738 * @state: PM transition of the system being carried out.
740 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
741 * allow device drivers' interrupt handlers to be called.
743 void dpm_resume_noirq(pm_message_t state)
745 dpm_noirq_resume_devices(state);
747 resume_device_irqs();
748 device_wakeup_disarm_wake_irqs();
752 * device_resume_early - Execute an "early resume" callback for given device.
753 * @dev: Device to handle.
754 * @state: PM transition of the system being carried out.
755 * @async: If true, the device is being resumed asynchronously.
757 * Runtime PM is disabled for @dev while this function is being executed.
759 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
761 pm_callback_t callback = NULL;
762 const char *info = NULL;
768 if (dev->power.syscore || dev->power.direct_complete)
771 if (!dev->power.is_late_suspended)
774 if (!dpm_wait_for_superior(dev, async))
777 if (dev->pm_domain) {
778 info = "early power domain ";
779 callback = pm_late_early_op(&dev->pm_domain->ops, state);
780 } else if (dev->type && dev->type->pm) {
781 info = "early type ";
782 callback = pm_late_early_op(dev->type->pm, state);
783 } else if (dev->class && dev->class->pm) {
784 info = "early class ";
785 callback = pm_late_early_op(dev->class->pm, state);
786 } else if (dev->bus && dev->bus->pm) {
788 callback = pm_late_early_op(dev->bus->pm, state);
793 if (dev_pm_skip_resume(dev))
796 if (dev->driver && dev->driver->pm) {
797 info = "early driver ";
798 callback = pm_late_early_op(dev->driver->pm, state);
802 error = dpm_run_callback(callback, dev, state, info);
805 dev->power.is_late_suspended = false;
810 pm_runtime_enable(dev);
811 complete_all(&dev->power.completion);
815 static void async_resume_early(void *data, async_cookie_t cookie)
817 struct device *dev = (struct device *)data;
820 error = device_resume_early(dev, pm_transition, true);
822 pm_dev_err(dev, pm_transition, " async", error);
828 * dpm_resume_early - Execute "early resume" callbacks for all devices.
829 * @state: PM transition of the system being carried out.
831 void dpm_resume_early(pm_message_t state)
834 ktime_t starttime = ktime_get();
836 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
837 mutex_lock(&dpm_list_mtx);
838 pm_transition = state;
841 * Advanced the async threads upfront,
842 * in case the starting of async threads is
843 * delayed by non-async resuming devices.
845 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
846 dpm_async_fn(dev, async_resume_early);
848 while (!list_empty(&dpm_late_early_list)) {
849 dev = to_device(dpm_late_early_list.next);
851 list_move_tail(&dev->power.entry, &dpm_suspended_list);
852 mutex_unlock(&dpm_list_mtx);
854 if (!is_async(dev)) {
857 error = device_resume_early(dev, state, false);
859 suspend_stats.failed_resume_early++;
860 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
861 dpm_save_failed_dev(dev_name(dev));
862 pm_dev_err(dev, state, " early", error);
865 mutex_lock(&dpm_list_mtx);
868 mutex_unlock(&dpm_list_mtx);
869 async_synchronize_full();
870 dpm_show_time(starttime, state, 0, "early");
871 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
875 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
876 * @state: PM transition of the system being carried out.
878 void dpm_resume_start(pm_message_t state)
880 dpm_resume_noirq(state);
881 dpm_resume_early(state);
883 EXPORT_SYMBOL_GPL(dpm_resume_start);
886 * device_resume - Execute "resume" callbacks for given device.
887 * @dev: Device to handle.
888 * @state: PM transition of the system being carried out.
889 * @async: If true, the device is being resumed asynchronously.
891 static int device_resume(struct device *dev, pm_message_t state, bool async)
893 pm_callback_t callback = NULL;
894 const char *info = NULL;
896 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
901 if (dev->power.syscore)
904 if (dev->power.direct_complete) {
905 /* Match the pm_runtime_disable() in __device_suspend(). */
906 pm_runtime_enable(dev);
910 if (!dpm_wait_for_superior(dev, async))
913 dpm_watchdog_set(&wd, dev);
917 * This is a fib. But we'll allow new children to be added below
918 * a resumed device, even if the device hasn't been completed yet.
920 dev->power.is_prepared = false;
922 if (!dev->power.is_suspended)
925 if (dev->pm_domain) {
926 info = "power domain ";
927 callback = pm_op(&dev->pm_domain->ops, state);
931 if (dev->type && dev->type->pm) {
933 callback = pm_op(dev->type->pm, state);
937 if (dev->class && dev->class->pm) {
939 callback = pm_op(dev->class->pm, state);
946 callback = pm_op(dev->bus->pm, state);
947 } else if (dev->bus->resume) {
948 info = "legacy bus ";
949 callback = dev->bus->resume;
955 if (!callback && dev->driver && dev->driver->pm) {
957 callback = pm_op(dev->driver->pm, state);
961 error = dpm_run_callback(callback, dev, state, info);
962 dev->power.is_suspended = false;
966 dpm_watchdog_clear(&wd);
969 complete_all(&dev->power.completion);
976 static void async_resume(void *data, async_cookie_t cookie)
978 struct device *dev = (struct device *)data;
981 error = device_resume(dev, pm_transition, true);
983 pm_dev_err(dev, pm_transition, " async", error);
988 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
989 * @state: PM transition of the system being carried out.
991 * Execute the appropriate "resume" callback for all devices whose status
992 * indicates that they are suspended.
994 void dpm_resume(pm_message_t state)
997 ktime_t starttime = ktime_get();
999 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1002 mutex_lock(&dpm_list_mtx);
1003 pm_transition = state;
1006 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1007 dpm_async_fn(dev, async_resume);
1009 while (!list_empty(&dpm_suspended_list)) {
1010 dev = to_device(dpm_suspended_list.next);
1012 if (!is_async(dev)) {
1015 mutex_unlock(&dpm_list_mtx);
1017 error = device_resume(dev, state, false);
1019 suspend_stats.failed_resume++;
1020 dpm_save_failed_step(SUSPEND_RESUME);
1021 dpm_save_failed_dev(dev_name(dev));
1022 pm_dev_err(dev, state, "", error);
1025 mutex_lock(&dpm_list_mtx);
1027 if (!list_empty(&dev->power.entry))
1028 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1031 mutex_unlock(&dpm_list_mtx);
1032 async_synchronize_full();
1033 dpm_show_time(starttime, state, 0, NULL);
1037 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1041 * device_complete - Complete a PM transition for given device.
1042 * @dev: Device to handle.
1043 * @state: PM transition of the system being carried out.
1045 static void device_complete(struct device *dev, pm_message_t state)
1047 void (*callback)(struct device *) = NULL;
1048 const char *info = NULL;
1050 if (dev->power.syscore)
1055 if (dev->pm_domain) {
1056 info = "completing power domain ";
1057 callback = dev->pm_domain->ops.complete;
1058 } else if (dev->type && dev->type->pm) {
1059 info = "completing type ";
1060 callback = dev->type->pm->complete;
1061 } else if (dev->class && dev->class->pm) {
1062 info = "completing class ";
1063 callback = dev->class->pm->complete;
1064 } else if (dev->bus && dev->bus->pm) {
1065 info = "completing bus ";
1066 callback = dev->bus->pm->complete;
1069 if (!callback && dev->driver && dev->driver->pm) {
1070 info = "completing driver ";
1071 callback = dev->driver->pm->complete;
1075 pm_dev_dbg(dev, state, info);
1082 pm_runtime_put(dev);
1086 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1087 * @state: PM transition of the system being carried out.
1089 * Execute the ->complete() callbacks for all devices whose PM status is not
1090 * DPM_ON (this allows new devices to be registered).
1092 void dpm_complete(pm_message_t state)
1094 struct list_head list;
1096 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1099 INIT_LIST_HEAD(&list);
1100 mutex_lock(&dpm_list_mtx);
1101 while (!list_empty(&dpm_prepared_list)) {
1102 struct device *dev = to_device(dpm_prepared_list.prev);
1105 dev->power.is_prepared = false;
1106 list_move(&dev->power.entry, &list);
1107 mutex_unlock(&dpm_list_mtx);
1109 trace_device_pm_callback_start(dev, "", state.event);
1110 device_complete(dev, state);
1111 trace_device_pm_callback_end(dev, 0);
1113 mutex_lock(&dpm_list_mtx);
1116 list_splice(&list, &dpm_list);
1117 mutex_unlock(&dpm_list_mtx);
1119 /* Allow device probing and trigger re-probing of deferred devices */
1120 device_unblock_probing();
1121 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1125 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1126 * @state: PM transition of the system being carried out.
1128 * Execute "resume" callbacks for all devices and complete the PM transition of
1131 void dpm_resume_end(pm_message_t state)
1134 dpm_complete(state);
1136 EXPORT_SYMBOL_GPL(dpm_resume_end);
1139 /*------------------------- Suspend routines -------------------------*/
1142 * resume_event - Return a "resume" message for given "suspend" sleep state.
1143 * @sleep_state: PM message representing a sleep state.
1145 * Return a PM message representing the resume event corresponding to given
1148 static pm_message_t resume_event(pm_message_t sleep_state)
1150 switch (sleep_state.event) {
1151 case PM_EVENT_SUSPEND:
1153 case PM_EVENT_FREEZE:
1154 case PM_EVENT_QUIESCE:
1155 return PMSG_RECOVER;
1156 case PM_EVENT_HIBERNATE:
1157 return PMSG_RESTORE;
1162 static void dpm_superior_set_must_resume(struct device *dev)
1164 struct device_link *link;
1168 dev->parent->power.must_resume = true;
1170 idx = device_links_read_lock();
1172 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1173 link->supplier->power.must_resume = true;
1175 device_links_read_unlock(idx);
1179 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1180 * @dev: Device to handle.
1181 * @state: PM transition of the system being carried out.
1182 * @async: If true, the device is being suspended asynchronously.
1184 * The driver of @dev will not receive interrupts while this function is being
1187 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1189 pm_callback_t callback = NULL;
1190 const char *info = NULL;
1196 dpm_wait_for_subordinate(dev, async);
1201 if (dev->power.syscore || dev->power.direct_complete)
1204 if (dev->pm_domain) {
1205 info = "noirq power domain ";
1206 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1207 } else if (dev->type && dev->type->pm) {
1208 info = "noirq type ";
1209 callback = pm_noirq_op(dev->type->pm, state);
1210 } else if (dev->class && dev->class->pm) {
1211 info = "noirq class ";
1212 callback = pm_noirq_op(dev->class->pm, state);
1213 } else if (dev->bus && dev->bus->pm) {
1214 info = "noirq bus ";
1215 callback = pm_noirq_op(dev->bus->pm, state);
1220 if (dev_pm_skip_suspend(dev))
1223 if (dev->driver && dev->driver->pm) {
1224 info = "noirq driver ";
1225 callback = pm_noirq_op(dev->driver->pm, state);
1229 error = dpm_run_callback(callback, dev, state, info);
1231 async_error = error;
1236 dev->power.is_noirq_suspended = true;
1239 * Skipping the resume of devices that were in use right before the
1240 * system suspend (as indicated by their PM-runtime usage counters)
1241 * would be suboptimal. Also resume them if doing that is not allowed
1244 if (atomic_read(&dev->power.usage_count) > 1 ||
1245 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1246 dev->power.may_skip_resume))
1247 dev->power.must_resume = true;
1249 if (dev->power.must_resume)
1250 dpm_superior_set_must_resume(dev);
1253 complete_all(&dev->power.completion);
1254 TRACE_SUSPEND(error);
1258 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1260 struct device *dev = (struct device *)data;
1263 error = __device_suspend_noirq(dev, pm_transition, true);
1265 dpm_save_failed_dev(dev_name(dev));
1266 pm_dev_err(dev, pm_transition, " async", error);
1272 static int device_suspend_noirq(struct device *dev)
1274 if (dpm_async_fn(dev, async_suspend_noirq))
1277 return __device_suspend_noirq(dev, pm_transition, false);
1280 static int dpm_noirq_suspend_devices(pm_message_t state)
1282 ktime_t starttime = ktime_get();
1285 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1286 mutex_lock(&dpm_list_mtx);
1287 pm_transition = state;
1290 while (!list_empty(&dpm_late_early_list)) {
1291 struct device *dev = to_device(dpm_late_early_list.prev);
1294 mutex_unlock(&dpm_list_mtx);
1296 error = device_suspend_noirq(dev);
1298 mutex_lock(&dpm_list_mtx);
1300 pm_dev_err(dev, state, " noirq", error);
1301 dpm_save_failed_dev(dev_name(dev));
1305 if (!list_empty(&dev->power.entry))
1306 list_move(&dev->power.entry, &dpm_noirq_list);
1312 mutex_unlock(&dpm_list_mtx);
1313 async_synchronize_full();
1315 error = async_error;
1318 suspend_stats.failed_suspend_noirq++;
1319 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1321 dpm_show_time(starttime, state, error, "noirq");
1322 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1327 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1328 * @state: PM transition of the system being carried out.
1330 * Prevent device drivers' interrupt handlers from being called and invoke
1331 * "noirq" suspend callbacks for all non-sysdev devices.
1333 int dpm_suspend_noirq(pm_message_t state)
1337 device_wakeup_arm_wake_irqs();
1338 suspend_device_irqs();
1340 ret = dpm_noirq_suspend_devices(state);
1342 dpm_resume_noirq(resume_event(state));
1347 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1349 struct device *parent = dev->parent;
1354 spin_lock_irq(&parent->power.lock);
1356 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1357 parent->power.wakeup_path = true;
1359 spin_unlock_irq(&parent->power.lock);
1363 * __device_suspend_late - Execute a "late suspend" callback for given device.
1364 * @dev: Device to handle.
1365 * @state: PM transition of the system being carried out.
1366 * @async: If true, the device is being suspended asynchronously.
1368 * Runtime PM is disabled for @dev while this function is being executed.
1370 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1372 pm_callback_t callback = NULL;
1373 const char *info = NULL;
1379 __pm_runtime_disable(dev, false);
1381 dpm_wait_for_subordinate(dev, async);
1386 if (pm_wakeup_pending()) {
1387 async_error = -EBUSY;
1391 if (dev->power.syscore || dev->power.direct_complete)
1394 if (dev->pm_domain) {
1395 info = "late power domain ";
1396 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1397 } else if (dev->type && dev->type->pm) {
1398 info = "late type ";
1399 callback = pm_late_early_op(dev->type->pm, state);
1400 } else if (dev->class && dev->class->pm) {
1401 info = "late class ";
1402 callback = pm_late_early_op(dev->class->pm, state);
1403 } else if (dev->bus && dev->bus->pm) {
1405 callback = pm_late_early_op(dev->bus->pm, state);
1410 if (dev_pm_skip_suspend(dev))
1413 if (dev->driver && dev->driver->pm) {
1414 info = "late driver ";
1415 callback = pm_late_early_op(dev->driver->pm, state);
1419 error = dpm_run_callback(callback, dev, state, info);
1421 async_error = error;
1424 dpm_propagate_wakeup_to_parent(dev);
1427 dev->power.is_late_suspended = true;
1430 TRACE_SUSPEND(error);
1431 complete_all(&dev->power.completion);
1435 static void async_suspend_late(void *data, async_cookie_t cookie)
1437 struct device *dev = (struct device *)data;
1440 error = __device_suspend_late(dev, pm_transition, true);
1442 dpm_save_failed_dev(dev_name(dev));
1443 pm_dev_err(dev, pm_transition, " async", error);
1448 static int device_suspend_late(struct device *dev)
1450 if (dpm_async_fn(dev, async_suspend_late))
1453 return __device_suspend_late(dev, pm_transition, false);
1457 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1458 * @state: PM transition of the system being carried out.
1460 int dpm_suspend_late(pm_message_t state)
1462 ktime_t starttime = ktime_get();
1465 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1466 mutex_lock(&dpm_list_mtx);
1467 pm_transition = state;
1470 while (!list_empty(&dpm_suspended_list)) {
1471 struct device *dev = to_device(dpm_suspended_list.prev);
1474 mutex_unlock(&dpm_list_mtx);
1476 error = device_suspend_late(dev);
1478 mutex_lock(&dpm_list_mtx);
1479 if (!list_empty(&dev->power.entry))
1480 list_move(&dev->power.entry, &dpm_late_early_list);
1483 pm_dev_err(dev, state, " late", error);
1484 dpm_save_failed_dev(dev_name(dev));
1493 mutex_unlock(&dpm_list_mtx);
1494 async_synchronize_full();
1496 error = async_error;
1498 suspend_stats.failed_suspend_late++;
1499 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1500 dpm_resume_early(resume_event(state));
1502 dpm_show_time(starttime, state, error, "late");
1503 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1508 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1509 * @state: PM transition of the system being carried out.
1511 int dpm_suspend_end(pm_message_t state)
1513 ktime_t starttime = ktime_get();
1516 error = dpm_suspend_late(state);
1520 error = dpm_suspend_noirq(state);
1522 dpm_resume_early(resume_event(state));
1525 dpm_show_time(starttime, state, error, "end");
1528 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1531 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1532 * @dev: Device to suspend.
1533 * @state: PM transition of the system being carried out.
1534 * @cb: Suspend callback to execute.
1535 * @info: string description of caller.
1537 static int legacy_suspend(struct device *dev, pm_message_t state,
1538 int (*cb)(struct device *dev, pm_message_t state),
1544 calltime = initcall_debug_start(dev, cb);
1546 trace_device_pm_callback_start(dev, info, state.event);
1547 error = cb(dev, state);
1548 trace_device_pm_callback_end(dev, error);
1549 suspend_report_result(cb, error);
1551 initcall_debug_report(dev, calltime, cb, error);
1556 static void dpm_clear_superiors_direct_complete(struct device *dev)
1558 struct device_link *link;
1562 spin_lock_irq(&dev->parent->power.lock);
1563 dev->parent->power.direct_complete = false;
1564 spin_unlock_irq(&dev->parent->power.lock);
1567 idx = device_links_read_lock();
1569 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1570 spin_lock_irq(&link->supplier->power.lock);
1571 link->supplier->power.direct_complete = false;
1572 spin_unlock_irq(&link->supplier->power.lock);
1575 device_links_read_unlock(idx);
1579 * __device_suspend - Execute "suspend" callbacks for given device.
1580 * @dev: Device to handle.
1581 * @state: PM transition of the system being carried out.
1582 * @async: If true, the device is being suspended asynchronously.
1584 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1586 pm_callback_t callback = NULL;
1587 const char *info = NULL;
1589 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1594 dpm_wait_for_subordinate(dev, async);
1597 dev->power.direct_complete = false;
1602 * Wait for possible runtime PM transitions of the device in progress
1603 * to complete and if there's a runtime resume request pending for it,
1604 * resume it before proceeding with invoking the system-wide suspend
1607 * If the system-wide suspend callbacks below change the configuration
1608 * of the device, they must disable runtime PM for it or otherwise
1609 * ensure that its runtime-resume callbacks will not be confused by that
1610 * change in case they are invoked going forward.
1612 pm_runtime_barrier(dev);
1614 if (pm_wakeup_pending()) {
1615 dev->power.direct_complete = false;
1616 async_error = -EBUSY;
1620 if (dev->power.syscore)
1623 /* Avoid direct_complete to let wakeup_path propagate. */
1624 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1625 dev->power.direct_complete = false;
1627 if (dev->power.direct_complete) {
1628 if (pm_runtime_status_suspended(dev)) {
1629 pm_runtime_disable(dev);
1630 if (pm_runtime_status_suspended(dev)) {
1631 pm_dev_dbg(dev, state, "direct-complete ");
1635 pm_runtime_enable(dev);
1637 dev->power.direct_complete = false;
1640 dev->power.may_skip_resume = true;
1641 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1643 dpm_watchdog_set(&wd, dev);
1646 if (dev->pm_domain) {
1647 info = "power domain ";
1648 callback = pm_op(&dev->pm_domain->ops, state);
1652 if (dev->type && dev->type->pm) {
1654 callback = pm_op(dev->type->pm, state);
1658 if (dev->class && dev->class->pm) {
1660 callback = pm_op(dev->class->pm, state);
1667 callback = pm_op(dev->bus->pm, state);
1668 } else if (dev->bus->suspend) {
1669 pm_dev_dbg(dev, state, "legacy bus ");
1670 error = legacy_suspend(dev, state, dev->bus->suspend,
1677 if (!callback && dev->driver && dev->driver->pm) {
1679 callback = pm_op(dev->driver->pm, state);
1682 error = dpm_run_callback(callback, dev, state, info);
1686 dev->power.is_suspended = true;
1687 if (device_may_wakeup(dev))
1688 dev->power.wakeup_path = true;
1690 dpm_propagate_wakeup_to_parent(dev);
1691 dpm_clear_superiors_direct_complete(dev);
1695 dpm_watchdog_clear(&wd);
1699 async_error = error;
1701 complete_all(&dev->power.completion);
1702 TRACE_SUSPEND(error);
1706 static void async_suspend(void *data, async_cookie_t cookie)
1708 struct device *dev = (struct device *)data;
1711 error = __device_suspend(dev, pm_transition, true);
1713 dpm_save_failed_dev(dev_name(dev));
1714 pm_dev_err(dev, pm_transition, " async", error);
1720 static int device_suspend(struct device *dev)
1722 if (dpm_async_fn(dev, async_suspend))
1725 return __device_suspend(dev, pm_transition, false);
1729 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1730 * @state: PM transition of the system being carried out.
1732 int dpm_suspend(pm_message_t state)
1734 ktime_t starttime = ktime_get();
1737 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1743 mutex_lock(&dpm_list_mtx);
1744 pm_transition = state;
1746 while (!list_empty(&dpm_prepared_list)) {
1747 struct device *dev = to_device(dpm_prepared_list.prev);
1750 mutex_unlock(&dpm_list_mtx);
1752 error = device_suspend(dev);
1754 mutex_lock(&dpm_list_mtx);
1756 pm_dev_err(dev, state, "", error);
1757 dpm_save_failed_dev(dev_name(dev));
1761 if (!list_empty(&dev->power.entry))
1762 list_move(&dev->power.entry, &dpm_suspended_list);
1767 mutex_unlock(&dpm_list_mtx);
1768 async_synchronize_full();
1770 error = async_error;
1772 suspend_stats.failed_suspend++;
1773 dpm_save_failed_step(SUSPEND_SUSPEND);
1775 dpm_show_time(starttime, state, error, NULL);
1776 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1781 * device_prepare - Prepare a device for system power transition.
1782 * @dev: Device to handle.
1783 * @state: PM transition of the system being carried out.
1785 * Execute the ->prepare() callback(s) for given device. No new children of the
1786 * device may be registered after this function has returned.
1788 static int device_prepare(struct device *dev, pm_message_t state)
1790 int (*callback)(struct device *) = NULL;
1794 * If a device's parent goes into runtime suspend at the wrong time,
1795 * it won't be possible to resume the device. To prevent this we
1796 * block runtime suspend here, during the prepare phase, and allow
1797 * it again during the complete phase.
1799 pm_runtime_get_noresume(dev);
1801 if (dev->power.syscore)
1806 dev->power.wakeup_path = false;
1808 if (dev->power.no_pm_callbacks)
1812 callback = dev->pm_domain->ops.prepare;
1813 else if (dev->type && dev->type->pm)
1814 callback = dev->type->pm->prepare;
1815 else if (dev->class && dev->class->pm)
1816 callback = dev->class->pm->prepare;
1817 else if (dev->bus && dev->bus->pm)
1818 callback = dev->bus->pm->prepare;
1820 if (!callback && dev->driver && dev->driver->pm)
1821 callback = dev->driver->pm->prepare;
1824 ret = callback(dev);
1830 suspend_report_result(callback, ret);
1831 pm_runtime_put(dev);
1835 * A positive return value from ->prepare() means "this device appears
1836 * to be runtime-suspended and its state is fine, so if it really is
1837 * runtime-suspended, you can leave it in that state provided that you
1838 * will do the same thing with all of its descendants". This only
1839 * applies to suspend transitions, however.
1841 spin_lock_irq(&dev->power.lock);
1842 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1843 (ret > 0 || dev->power.no_pm_callbacks) &&
1844 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1845 spin_unlock_irq(&dev->power.lock);
1850 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1851 * @state: PM transition of the system being carried out.
1853 * Execute the ->prepare() callback(s) for all devices.
1855 int dpm_prepare(pm_message_t state)
1859 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1863 * Give a chance for the known devices to complete their probes, before
1864 * disable probing of devices. This sync point is important at least
1865 * at boot time + hibernation restore.
1867 wait_for_device_probe();
1869 * It is unsafe if probing of devices will happen during suspend or
1870 * hibernation and system behavior will be unpredictable in this case.
1871 * So, let's prohibit device's probing here and defer their probes
1872 * instead. The normal behavior will be restored in dpm_complete().
1874 device_block_probing();
1876 mutex_lock(&dpm_list_mtx);
1877 while (!list_empty(&dpm_list)) {
1878 struct device *dev = to_device(dpm_list.next);
1881 mutex_unlock(&dpm_list_mtx);
1883 trace_device_pm_callback_start(dev, "", state.event);
1884 error = device_prepare(dev, state);
1885 trace_device_pm_callback_end(dev, error);
1887 mutex_lock(&dpm_list_mtx);
1889 if (error == -EAGAIN) {
1894 dev_info(dev, "not prepared for power transition: code %d\n",
1899 dev->power.is_prepared = true;
1900 if (!list_empty(&dev->power.entry))
1901 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1904 mutex_unlock(&dpm_list_mtx);
1905 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1910 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1911 * @state: PM transition of the system being carried out.
1913 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1914 * callbacks for them.
1916 int dpm_suspend_start(pm_message_t state)
1918 ktime_t starttime = ktime_get();
1921 error = dpm_prepare(state);
1923 suspend_stats.failed_prepare++;
1924 dpm_save_failed_step(SUSPEND_PREPARE);
1926 error = dpm_suspend(state);
1927 dpm_show_time(starttime, state, error, "start");
1930 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1932 void __suspend_report_result(const char *function, void *fn, int ret)
1935 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1937 EXPORT_SYMBOL_GPL(__suspend_report_result);
1940 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1941 * @subordinate: Device that needs to wait for @dev.
1942 * @dev: Device to wait for.
1944 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1946 dpm_wait(dev, subordinate->power.async_suspend);
1949 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1952 * dpm_for_each_dev - device iterator.
1953 * @data: data for the callback.
1954 * @fn: function to be called for each device.
1956 * Iterate over devices in dpm_list, and call @fn for each device,
1959 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1967 list_for_each_entry(dev, &dpm_list, power.entry)
1971 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1973 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1978 return !ops->prepare &&
1980 !ops->suspend_late &&
1981 !ops->suspend_noirq &&
1982 !ops->resume_noirq &&
1983 !ops->resume_early &&
1988 void device_pm_check_callbacks(struct device *dev)
1990 spin_lock_irq(&dev->power.lock);
1991 dev->power.no_pm_callbacks =
1992 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1993 !dev->bus->suspend && !dev->bus->resume)) &&
1994 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1995 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1996 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1997 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1998 !dev->driver->suspend && !dev->driver->resume));
1999 spin_unlock_irq(&dev->power.lock);
2002 bool dev_pm_skip_suspend(struct device *dev)
2004 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2005 pm_runtime_status_suspended(dev);