PM: core: keep irq flags in device_pm_check_callbacks()
[platform/kernel/linux-rpi.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/devfreq.h>
37 #include <linux/timer.h>
38
39 #include "../base.h"
40 #include "power.h"
41
42 typedef int (*pm_callback_t)(struct device *);
43
44 #define list_for_each_entry_rcu_locked(pos, head, member) \
45         list_for_each_entry_rcu(pos, head, member, \
46                         device_links_read_lock_held())
47
48 /*
49  * The entries in the dpm_list list are in a depth first order, simply
50  * because children are guaranteed to be discovered after parents, and
51  * are inserted at the back of the list on discovery.
52  *
53  * Since device_pm_add() may be called with a device lock held,
54  * we must never try to acquire a device lock while holding
55  * dpm_list_mutex.
56  */
57
58 LIST_HEAD(dpm_list);
59 static LIST_HEAD(dpm_prepared_list);
60 static LIST_HEAD(dpm_suspended_list);
61 static LIST_HEAD(dpm_late_early_list);
62 static LIST_HEAD(dpm_noirq_list);
63
64 struct suspend_stats suspend_stats;
65 static DEFINE_MUTEX(dpm_list_mtx);
66 static pm_message_t pm_transition;
67
68 static int async_error;
69
70 static const char *pm_verb(int event)
71 {
72         switch (event) {
73         case PM_EVENT_SUSPEND:
74                 return "suspend";
75         case PM_EVENT_RESUME:
76                 return "resume";
77         case PM_EVENT_FREEZE:
78                 return "freeze";
79         case PM_EVENT_QUIESCE:
80                 return "quiesce";
81         case PM_EVENT_HIBERNATE:
82                 return "hibernate";
83         case PM_EVENT_THAW:
84                 return "thaw";
85         case PM_EVENT_RESTORE:
86                 return "restore";
87         case PM_EVENT_RECOVER:
88                 return "recover";
89         default:
90                 return "(unknown PM event)";
91         }
92 }
93
94 /**
95  * device_pm_sleep_init - Initialize system suspend-related device fields.
96  * @dev: Device object being initialized.
97  */
98 void device_pm_sleep_init(struct device *dev)
99 {
100         dev->power.is_prepared = false;
101         dev->power.is_suspended = false;
102         dev->power.is_noirq_suspended = false;
103         dev->power.is_late_suspended = false;
104         init_completion(&dev->power.completion);
105         complete_all(&dev->power.completion);
106         dev->power.wakeup = NULL;
107         INIT_LIST_HEAD(&dev->power.entry);
108 }
109
110 /**
111  * device_pm_lock - Lock the list of active devices used by the PM core.
112  */
113 void device_pm_lock(void)
114 {
115         mutex_lock(&dpm_list_mtx);
116 }
117
118 /**
119  * device_pm_unlock - Unlock the list of active devices used by the PM core.
120  */
121 void device_pm_unlock(void)
122 {
123         mutex_unlock(&dpm_list_mtx);
124 }
125
126 /**
127  * device_pm_add - Add a device to the PM core's list of active devices.
128  * @dev: Device to add to the list.
129  */
130 void device_pm_add(struct device *dev)
131 {
132         /* Skip PM setup/initialization. */
133         if (device_pm_not_required(dev))
134                 return;
135
136         pr_debug("Adding info for %s:%s\n",
137                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138         device_pm_check_callbacks(dev);
139         mutex_lock(&dpm_list_mtx);
140         if (dev->parent && dev->parent->power.is_prepared)
141                 dev_warn(dev, "parent %s should not be sleeping\n",
142                         dev_name(dev->parent));
143         list_add_tail(&dev->power.entry, &dpm_list);
144         dev->power.in_dpm_list = true;
145         mutex_unlock(&dpm_list_mtx);
146 }
147
148 /**
149  * device_pm_remove - Remove a device from the PM core's list of active devices.
150  * @dev: Device to be removed from the list.
151  */
152 void device_pm_remove(struct device *dev)
153 {
154         if (device_pm_not_required(dev))
155                 return;
156
157         pr_debug("Removing info for %s:%s\n",
158                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159         complete_all(&dev->power.completion);
160         mutex_lock(&dpm_list_mtx);
161         list_del_init(&dev->power.entry);
162         dev->power.in_dpm_list = false;
163         mutex_unlock(&dpm_list_mtx);
164         device_wakeup_disable(dev);
165         pm_runtime_remove(dev);
166         device_pm_check_callbacks(dev);
167 }
168
169 /**
170  * device_pm_move_before - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come before.
173  */
174 void device_pm_move_before(struct device *deva, struct device *devb)
175 {
176         pr_debug("Moving %s:%s before %s:%s\n",
177                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179         /* Delete deva from dpm_list and reinsert before devb. */
180         list_move_tail(&deva->power.entry, &devb->power.entry);
181 }
182
183 /**
184  * device_pm_move_after - Move device in the PM core's list of active devices.
185  * @deva: Device to move in dpm_list.
186  * @devb: Device @deva should come after.
187  */
188 void device_pm_move_after(struct device *deva, struct device *devb)
189 {
190         pr_debug("Moving %s:%s after %s:%s\n",
191                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
192                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
193         /* Delete deva from dpm_list and reinsert after devb. */
194         list_move(&deva->power.entry, &devb->power.entry);
195 }
196
197 /**
198  * device_pm_move_last - Move device to end of the PM core's list of devices.
199  * @dev: Device to move in dpm_list.
200  */
201 void device_pm_move_last(struct device *dev)
202 {
203         pr_debug("Moving %s:%s to end of list\n",
204                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
205         list_move_tail(&dev->power.entry, &dpm_list);
206 }
207
208 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 {
210         if (!pm_print_times_enabled)
211                 return 0;
212
213         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
214                  task_pid_nr(current),
215                  dev->parent ? dev_name(dev->parent) : "none");
216         return ktime_get();
217 }
218
219 static void initcall_debug_report(struct device *dev, ktime_t calltime,
220                                   void *cb, int error)
221 {
222         ktime_t rettime;
223
224         if (!pm_print_times_enabled)
225                 return;
226
227         rettime = ktime_get();
228         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
229                  (unsigned long long)ktime_us_delta(rettime, calltime));
230 }
231
232 /**
233  * dpm_wait - Wait for a PM operation to complete.
234  * @dev: Device to wait for.
235  * @async: If unset, wait only if the device's power.async_suspend flag is set.
236  */
237 static void dpm_wait(struct device *dev, bool async)
238 {
239         if (!dev)
240                 return;
241
242         if (async || (pm_async_enabled && dev->power.async_suspend))
243                 wait_for_completion(&dev->power.completion);
244 }
245
246 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 {
248         dpm_wait(dev, *((bool *)async_ptr));
249         return 0;
250 }
251
252 static void dpm_wait_for_children(struct device *dev, bool async)
253 {
254        device_for_each_child(dev, &async, dpm_wait_fn);
255 }
256
257 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 {
259         struct device_link *link;
260         int idx;
261
262         idx = device_links_read_lock();
263
264         /*
265          * If the supplier goes away right after we've checked the link to it,
266          * we'll wait for its completion to change the state, but that's fine,
267          * because the only things that will block as a result are the SRCU
268          * callbacks freeing the link objects for the links in the list we're
269          * walking.
270          */
271         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
272                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
273                         dpm_wait(link->supplier, async);
274
275         device_links_read_unlock(idx);
276 }
277
278 static bool dpm_wait_for_superior(struct device *dev, bool async)
279 {
280         struct device *parent;
281
282         /*
283          * If the device is resumed asynchronously and the parent's callback
284          * deletes both the device and the parent itself, the parent object may
285          * be freed while this function is running, so avoid that by reference
286          * counting the parent once more unless the device has been deleted
287          * already (in which case return right away).
288          */
289         mutex_lock(&dpm_list_mtx);
290
291         if (!device_pm_initialized(dev)) {
292                 mutex_unlock(&dpm_list_mtx);
293                 return false;
294         }
295
296         parent = get_device(dev->parent);
297
298         mutex_unlock(&dpm_list_mtx);
299
300         dpm_wait(parent, async);
301         put_device(parent);
302
303         dpm_wait_for_suppliers(dev, async);
304
305         /*
306          * If the parent's callback has deleted the device, attempting to resume
307          * it would be invalid, so avoid doing that then.
308          */
309         return device_pm_initialized(dev);
310 }
311
312 static void dpm_wait_for_consumers(struct device *dev, bool async)
313 {
314         struct device_link *link;
315         int idx;
316
317         idx = device_links_read_lock();
318
319         /*
320          * The status of a device link can only be changed from "dormant" by a
321          * probe, but that cannot happen during system suspend/resume.  In
322          * theory it can change to "dormant" at that time, but then it is
323          * reasonable to wait for the target device anyway (eg. if it goes
324          * away, it's better to wait for it to go away completely and then
325          * continue instead of trying to continue in parallel with its
326          * unregistration).
327          */
328         list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
329                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
330                         dpm_wait(link->consumer, async);
331
332         device_links_read_unlock(idx);
333 }
334
335 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 {
337         dpm_wait_for_children(dev, async);
338         dpm_wait_for_consumers(dev, async);
339 }
340
341 /**
342  * pm_op - Return the PM operation appropriate for given PM event.
343  * @ops: PM operations to choose from.
344  * @state: PM transition of the system being carried out.
345  */
346 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347 {
348         switch (state.event) {
349 #ifdef CONFIG_SUSPEND
350         case PM_EVENT_SUSPEND:
351                 return ops->suspend;
352         case PM_EVENT_RESUME:
353                 return ops->resume;
354 #endif /* CONFIG_SUSPEND */
355 #ifdef CONFIG_HIBERNATE_CALLBACKS
356         case PM_EVENT_FREEZE:
357         case PM_EVENT_QUIESCE:
358                 return ops->freeze;
359         case PM_EVENT_HIBERNATE:
360                 return ops->poweroff;
361         case PM_EVENT_THAW:
362         case PM_EVENT_RECOVER:
363                 return ops->thaw;
364         case PM_EVENT_RESTORE:
365                 return ops->restore;
366 #endif /* CONFIG_HIBERNATE_CALLBACKS */
367         }
368
369         return NULL;
370 }
371
372 /**
373  * pm_late_early_op - Return the PM operation appropriate for given PM event.
374  * @ops: PM operations to choose from.
375  * @state: PM transition of the system being carried out.
376  *
377  * Runtime PM is disabled for @dev while this function is being executed.
378  */
379 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
380                                       pm_message_t state)
381 {
382         switch (state.event) {
383 #ifdef CONFIG_SUSPEND
384         case PM_EVENT_SUSPEND:
385                 return ops->suspend_late;
386         case PM_EVENT_RESUME:
387                 return ops->resume_early;
388 #endif /* CONFIG_SUSPEND */
389 #ifdef CONFIG_HIBERNATE_CALLBACKS
390         case PM_EVENT_FREEZE:
391         case PM_EVENT_QUIESCE:
392                 return ops->freeze_late;
393         case PM_EVENT_HIBERNATE:
394                 return ops->poweroff_late;
395         case PM_EVENT_THAW:
396         case PM_EVENT_RECOVER:
397                 return ops->thaw_early;
398         case PM_EVENT_RESTORE:
399                 return ops->restore_early;
400 #endif /* CONFIG_HIBERNATE_CALLBACKS */
401         }
402
403         return NULL;
404 }
405
406 /**
407  * pm_noirq_op - Return the PM operation appropriate for given PM event.
408  * @ops: PM operations to choose from.
409  * @state: PM transition of the system being carried out.
410  *
411  * The driver of @dev will not receive interrupts while this function is being
412  * executed.
413  */
414 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 {
416         switch (state.event) {
417 #ifdef CONFIG_SUSPEND
418         case PM_EVENT_SUSPEND:
419                 return ops->suspend_noirq;
420         case PM_EVENT_RESUME:
421                 return ops->resume_noirq;
422 #endif /* CONFIG_SUSPEND */
423 #ifdef CONFIG_HIBERNATE_CALLBACKS
424         case PM_EVENT_FREEZE:
425         case PM_EVENT_QUIESCE:
426                 return ops->freeze_noirq;
427         case PM_EVENT_HIBERNATE:
428                 return ops->poweroff_noirq;
429         case PM_EVENT_THAW:
430         case PM_EVENT_RECOVER:
431                 return ops->thaw_noirq;
432         case PM_EVENT_RESTORE:
433                 return ops->restore_noirq;
434 #endif /* CONFIG_HIBERNATE_CALLBACKS */
435         }
436
437         return NULL;
438 }
439
440 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 {
442         dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
443                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
444                 ", may wakeup" : "", dev->power.driver_flags);
445 }
446
447 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
448                         int error)
449 {
450         dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
451                 error);
452 }
453
454 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
455                           const char *info)
456 {
457         ktime_t calltime;
458         u64 usecs64;
459         int usecs;
460
461         calltime = ktime_get();
462         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
463         do_div(usecs64, NSEC_PER_USEC);
464         usecs = usecs64;
465         if (usecs == 0)
466                 usecs = 1;
467
468         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
469                   info ?: "", info ? " " : "", pm_verb(state.event),
470                   error ? "aborted" : "complete",
471                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
472 }
473
474 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
475                             pm_message_t state, const char *info)
476 {
477         ktime_t calltime;
478         int error;
479
480         if (!cb)
481                 return 0;
482
483         calltime = initcall_debug_start(dev, cb);
484
485         pm_dev_dbg(dev, state, info);
486         trace_device_pm_callback_start(dev, info, state.event);
487         error = cb(dev);
488         trace_device_pm_callback_end(dev, error);
489         suspend_report_result(cb, error);
490
491         initcall_debug_report(dev, calltime, cb, error);
492
493         return error;
494 }
495
496 #ifdef CONFIG_DPM_WATCHDOG
497 struct dpm_watchdog {
498         struct device           *dev;
499         struct task_struct      *tsk;
500         struct timer_list       timer;
501 };
502
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504         struct dpm_watchdog wd
505
506 /**
507  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508  * @t: The timer that PM watchdog depends on.
509  *
510  * Called when a driver has timed out suspending or resuming.
511  * There's not much we can do here to recover so panic() to
512  * capture a crash-dump in pstore.
513  */
514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516         struct dpm_watchdog *wd = from_timer(wd, t, timer);
517
518         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
519         show_stack(wd->tsk, NULL, KERN_EMERG);
520         panic("%s %s: unrecoverable failure\n",
521                 dev_driver_string(wd->dev), dev_name(wd->dev));
522 }
523
524 /**
525  * dpm_watchdog_set - Enable pm watchdog for given device.
526  * @wd: Watchdog. Must be allocated on the stack.
527  * @dev: Device to handle.
528  */
529 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 {
531         struct timer_list *timer = &wd->timer;
532
533         wd->dev = dev;
534         wd->tsk = current;
535
536         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
537         /* use same timeout value for both suspend and resume */
538         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
539         add_timer(timer);
540 }
541
542 /**
543  * dpm_watchdog_clear - Disable suspend/resume watchdog.
544  * @wd: Watchdog to disable.
545  */
546 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 {
548         struct timer_list *timer = &wd->timer;
549
550         del_timer_sync(timer);
551         destroy_timer_on_stack(timer);
552 }
553 #else
554 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
555 #define dpm_watchdog_set(x, y)
556 #define dpm_watchdog_clear(x)
557 #endif
558
559 /*------------------------- Resume routines -------------------------*/
560
561 /**
562  * dev_pm_skip_resume - System-wide device resume optimization check.
563  * @dev: Target device.
564  *
565  * Return:
566  * - %false if the transition under way is RESTORE.
567  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
568  * - The logical negation of %power.must_resume otherwise (that is, when the
569  *   transition under way is RESUME).
570  */
571 bool dev_pm_skip_resume(struct device *dev)
572 {
573         if (pm_transition.event == PM_EVENT_RESTORE)
574                 return false;
575
576         if (pm_transition.event == PM_EVENT_THAW)
577                 return dev_pm_skip_suspend(dev);
578
579         return !dev->power.must_resume;
580 }
581
582 /**
583  * device_resume_noirq - Execute a "noirq resume" callback for given device.
584  * @dev: Device to handle.
585  * @state: PM transition of the system being carried out.
586  * @async: If true, the device is being resumed asynchronously.
587  *
588  * The driver of @dev will not receive interrupts while this function is being
589  * executed.
590  */
591 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
592 {
593         pm_callback_t callback = NULL;
594         const char *info = NULL;
595         bool skip_resume;
596         int error = 0;
597
598         TRACE_DEVICE(dev);
599         TRACE_RESUME(0);
600
601         if (dev->power.syscore || dev->power.direct_complete)
602                 goto Out;
603
604         if (!dev->power.is_noirq_suspended)
605                 goto Out;
606
607         if (!dpm_wait_for_superior(dev, async))
608                 goto Out;
609
610         skip_resume = dev_pm_skip_resume(dev);
611         /*
612          * If the driver callback is skipped below or by the middle layer
613          * callback and device_resume_early() also skips the driver callback for
614          * this device later, it needs to appear as "suspended" to PM-runtime,
615          * so change its status accordingly.
616          *
617          * Otherwise, the device is going to be resumed, so set its PM-runtime
618          * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
619          * to avoid confusing drivers that don't use it.
620          */
621         if (skip_resume)
622                 pm_runtime_set_suspended(dev);
623         else if (dev_pm_skip_suspend(dev))
624                 pm_runtime_set_active(dev);
625
626         if (dev->pm_domain) {
627                 info = "noirq power domain ";
628                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
629         } else if (dev->type && dev->type->pm) {
630                 info = "noirq type ";
631                 callback = pm_noirq_op(dev->type->pm, state);
632         } else if (dev->class && dev->class->pm) {
633                 info = "noirq class ";
634                 callback = pm_noirq_op(dev->class->pm, state);
635         } else if (dev->bus && dev->bus->pm) {
636                 info = "noirq bus ";
637                 callback = pm_noirq_op(dev->bus->pm, state);
638         }
639         if (callback)
640                 goto Run;
641
642         if (skip_resume)
643                 goto Skip;
644
645         if (dev->driver && dev->driver->pm) {
646                 info = "noirq driver ";
647                 callback = pm_noirq_op(dev->driver->pm, state);
648         }
649
650 Run:
651         error = dpm_run_callback(callback, dev, state, info);
652
653 Skip:
654         dev->power.is_noirq_suspended = false;
655
656 Out:
657         complete_all(&dev->power.completion);
658         TRACE_RESUME(error);
659         return error;
660 }
661
662 static bool is_async(struct device *dev)
663 {
664         return dev->power.async_suspend && pm_async_enabled
665                 && !pm_trace_is_enabled();
666 }
667
668 static bool dpm_async_fn(struct device *dev, async_func_t func)
669 {
670         reinit_completion(&dev->power.completion);
671
672         if (is_async(dev)) {
673                 get_device(dev);
674                 async_schedule_dev(func, dev);
675                 return true;
676         }
677
678         return false;
679 }
680
681 static void async_resume_noirq(void *data, async_cookie_t cookie)
682 {
683         struct device *dev = (struct device *)data;
684         int error;
685
686         error = device_resume_noirq(dev, pm_transition, true);
687         if (error)
688                 pm_dev_err(dev, pm_transition, " async", error);
689
690         put_device(dev);
691 }
692
693 static void dpm_noirq_resume_devices(pm_message_t state)
694 {
695         struct device *dev;
696         ktime_t starttime = ktime_get();
697
698         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
699         mutex_lock(&dpm_list_mtx);
700         pm_transition = state;
701
702         /*
703          * Advanced the async threads upfront,
704          * in case the starting of async threads is
705          * delayed by non-async resuming devices.
706          */
707         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
708                 dpm_async_fn(dev, async_resume_noirq);
709
710         while (!list_empty(&dpm_noirq_list)) {
711                 dev = to_device(dpm_noirq_list.next);
712                 get_device(dev);
713                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
714
715                 mutex_unlock(&dpm_list_mtx);
716
717                 if (!is_async(dev)) {
718                         int error;
719
720                         error = device_resume_noirq(dev, state, false);
721                         if (error) {
722                                 suspend_stats.failed_resume_noirq++;
723                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
724                                 dpm_save_failed_dev(dev_name(dev));
725                                 pm_dev_err(dev, state, " noirq", error);
726                         }
727                 }
728
729                 put_device(dev);
730
731                 mutex_lock(&dpm_list_mtx);
732         }
733         mutex_unlock(&dpm_list_mtx);
734         async_synchronize_full();
735         dpm_show_time(starttime, state, 0, "noirq");
736         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
737 }
738
739 /**
740  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
741  * @state: PM transition of the system being carried out.
742  *
743  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
744  * allow device drivers' interrupt handlers to be called.
745  */
746 void dpm_resume_noirq(pm_message_t state)
747 {
748         dpm_noirq_resume_devices(state);
749
750         resume_device_irqs();
751         device_wakeup_disarm_wake_irqs();
752
753         cpuidle_resume();
754 }
755
756 /**
757  * device_resume_early - Execute an "early resume" callback for given device.
758  * @dev: Device to handle.
759  * @state: PM transition of the system being carried out.
760  * @async: If true, the device is being resumed asynchronously.
761  *
762  * Runtime PM is disabled for @dev while this function is being executed.
763  */
764 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
765 {
766         pm_callback_t callback = NULL;
767         const char *info = NULL;
768         int error = 0;
769
770         TRACE_DEVICE(dev);
771         TRACE_RESUME(0);
772
773         if (dev->power.syscore || dev->power.direct_complete)
774                 goto Out;
775
776         if (!dev->power.is_late_suspended)
777                 goto Out;
778
779         if (!dpm_wait_for_superior(dev, async))
780                 goto Out;
781
782         if (dev->pm_domain) {
783                 info = "early power domain ";
784                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
785         } else if (dev->type && dev->type->pm) {
786                 info = "early type ";
787                 callback = pm_late_early_op(dev->type->pm, state);
788         } else if (dev->class && dev->class->pm) {
789                 info = "early class ";
790                 callback = pm_late_early_op(dev->class->pm, state);
791         } else if (dev->bus && dev->bus->pm) {
792                 info = "early bus ";
793                 callback = pm_late_early_op(dev->bus->pm, state);
794         }
795         if (callback)
796                 goto Run;
797
798         if (dev_pm_skip_resume(dev))
799                 goto Skip;
800
801         if (dev->driver && dev->driver->pm) {
802                 info = "early driver ";
803                 callback = pm_late_early_op(dev->driver->pm, state);
804         }
805
806 Run:
807         error = dpm_run_callback(callback, dev, state, info);
808
809 Skip:
810         dev->power.is_late_suspended = false;
811
812 Out:
813         TRACE_RESUME(error);
814
815         pm_runtime_enable(dev);
816         complete_all(&dev->power.completion);
817         return error;
818 }
819
820 static void async_resume_early(void *data, async_cookie_t cookie)
821 {
822         struct device *dev = (struct device *)data;
823         int error;
824
825         error = device_resume_early(dev, pm_transition, true);
826         if (error)
827                 pm_dev_err(dev, pm_transition, " async", error);
828
829         put_device(dev);
830 }
831
832 /**
833  * dpm_resume_early - Execute "early resume" callbacks for all devices.
834  * @state: PM transition of the system being carried out.
835  */
836 void dpm_resume_early(pm_message_t state)
837 {
838         struct device *dev;
839         ktime_t starttime = ktime_get();
840
841         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
842         mutex_lock(&dpm_list_mtx);
843         pm_transition = state;
844
845         /*
846          * Advanced the async threads upfront,
847          * in case the starting of async threads is
848          * delayed by non-async resuming devices.
849          */
850         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
851                 dpm_async_fn(dev, async_resume_early);
852
853         while (!list_empty(&dpm_late_early_list)) {
854                 dev = to_device(dpm_late_early_list.next);
855                 get_device(dev);
856                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
857
858                 mutex_unlock(&dpm_list_mtx);
859
860                 if (!is_async(dev)) {
861                         int error;
862
863                         error = device_resume_early(dev, state, false);
864                         if (error) {
865                                 suspend_stats.failed_resume_early++;
866                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
867                                 dpm_save_failed_dev(dev_name(dev));
868                                 pm_dev_err(dev, state, " early", error);
869                         }
870                 }
871
872                 put_device(dev);
873
874                 mutex_lock(&dpm_list_mtx);
875         }
876         mutex_unlock(&dpm_list_mtx);
877         async_synchronize_full();
878         dpm_show_time(starttime, state, 0, "early");
879         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
880 }
881
882 /**
883  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
884  * @state: PM transition of the system being carried out.
885  */
886 void dpm_resume_start(pm_message_t state)
887 {
888         dpm_resume_noirq(state);
889         dpm_resume_early(state);
890 }
891 EXPORT_SYMBOL_GPL(dpm_resume_start);
892
893 /**
894  * device_resume - Execute "resume" callbacks for given device.
895  * @dev: Device to handle.
896  * @state: PM transition of the system being carried out.
897  * @async: If true, the device is being resumed asynchronously.
898  */
899 static int device_resume(struct device *dev, pm_message_t state, bool async)
900 {
901         pm_callback_t callback = NULL;
902         const char *info = NULL;
903         int error = 0;
904         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
905
906         TRACE_DEVICE(dev);
907         TRACE_RESUME(0);
908
909         if (dev->power.syscore)
910                 goto Complete;
911
912         if (dev->power.direct_complete) {
913                 /* Match the pm_runtime_disable() in __device_suspend(). */
914                 pm_runtime_enable(dev);
915                 goto Complete;
916         }
917
918         if (!dpm_wait_for_superior(dev, async))
919                 goto Complete;
920
921         dpm_watchdog_set(&wd, dev);
922         device_lock(dev);
923
924         /*
925          * This is a fib.  But we'll allow new children to be added below
926          * a resumed device, even if the device hasn't been completed yet.
927          */
928         dev->power.is_prepared = false;
929
930         if (!dev->power.is_suspended)
931                 goto Unlock;
932
933         if (dev->pm_domain) {
934                 info = "power domain ";
935                 callback = pm_op(&dev->pm_domain->ops, state);
936                 goto Driver;
937         }
938
939         if (dev->type && dev->type->pm) {
940                 info = "type ";
941                 callback = pm_op(dev->type->pm, state);
942                 goto Driver;
943         }
944
945         if (dev->class && dev->class->pm) {
946                 info = "class ";
947                 callback = pm_op(dev->class->pm, state);
948                 goto Driver;
949         }
950
951         if (dev->bus) {
952                 if (dev->bus->pm) {
953                         info = "bus ";
954                         callback = pm_op(dev->bus->pm, state);
955                 } else if (dev->bus->resume) {
956                         info = "legacy bus ";
957                         callback = dev->bus->resume;
958                         goto End;
959                 }
960         }
961
962  Driver:
963         if (!callback && dev->driver && dev->driver->pm) {
964                 info = "driver ";
965                 callback = pm_op(dev->driver->pm, state);
966         }
967
968  End:
969         error = dpm_run_callback(callback, dev, state, info);
970         dev->power.is_suspended = false;
971
972  Unlock:
973         device_unlock(dev);
974         dpm_watchdog_clear(&wd);
975
976  Complete:
977         complete_all(&dev->power.completion);
978
979         TRACE_RESUME(error);
980
981         return error;
982 }
983
984 static void async_resume(void *data, async_cookie_t cookie)
985 {
986         struct device *dev = (struct device *)data;
987         int error;
988
989         error = device_resume(dev, pm_transition, true);
990         if (error)
991                 pm_dev_err(dev, pm_transition, " async", error);
992         put_device(dev);
993 }
994
995 /**
996  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
997  * @state: PM transition of the system being carried out.
998  *
999  * Execute the appropriate "resume" callback for all devices whose status
1000  * indicates that they are suspended.
1001  */
1002 void dpm_resume(pm_message_t state)
1003 {
1004         struct device *dev;
1005         ktime_t starttime = ktime_get();
1006
1007         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1008         might_sleep();
1009
1010         mutex_lock(&dpm_list_mtx);
1011         pm_transition = state;
1012         async_error = 0;
1013
1014         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1015                 dpm_async_fn(dev, async_resume);
1016
1017         while (!list_empty(&dpm_suspended_list)) {
1018                 dev = to_device(dpm_suspended_list.next);
1019                 get_device(dev);
1020                 if (!is_async(dev)) {
1021                         int error;
1022
1023                         mutex_unlock(&dpm_list_mtx);
1024
1025                         error = device_resume(dev, state, false);
1026                         if (error) {
1027                                 suspend_stats.failed_resume++;
1028                                 dpm_save_failed_step(SUSPEND_RESUME);
1029                                 dpm_save_failed_dev(dev_name(dev));
1030                                 pm_dev_err(dev, state, "", error);
1031                         }
1032
1033                         mutex_lock(&dpm_list_mtx);
1034                 }
1035                 if (!list_empty(&dev->power.entry))
1036                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1037
1038                 mutex_unlock(&dpm_list_mtx);
1039
1040                 put_device(dev);
1041
1042                 mutex_lock(&dpm_list_mtx);
1043         }
1044         mutex_unlock(&dpm_list_mtx);
1045         async_synchronize_full();
1046         dpm_show_time(starttime, state, 0, NULL);
1047
1048         cpufreq_resume();
1049         devfreq_resume();
1050         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1051 }
1052
1053 /**
1054  * device_complete - Complete a PM transition for given device.
1055  * @dev: Device to handle.
1056  * @state: PM transition of the system being carried out.
1057  */
1058 static void device_complete(struct device *dev, pm_message_t state)
1059 {
1060         void (*callback)(struct device *) = NULL;
1061         const char *info = NULL;
1062
1063         if (dev->power.syscore)
1064                 goto out;
1065
1066         device_lock(dev);
1067
1068         if (dev->pm_domain) {
1069                 info = "completing power domain ";
1070                 callback = dev->pm_domain->ops.complete;
1071         } else if (dev->type && dev->type->pm) {
1072                 info = "completing type ";
1073                 callback = dev->type->pm->complete;
1074         } else if (dev->class && dev->class->pm) {
1075                 info = "completing class ";
1076                 callback = dev->class->pm->complete;
1077         } else if (dev->bus && dev->bus->pm) {
1078                 info = "completing bus ";
1079                 callback = dev->bus->pm->complete;
1080         }
1081
1082         if (!callback && dev->driver && dev->driver->pm) {
1083                 info = "completing driver ";
1084                 callback = dev->driver->pm->complete;
1085         }
1086
1087         if (callback) {
1088                 pm_dev_dbg(dev, state, info);
1089                 callback(dev);
1090         }
1091
1092         device_unlock(dev);
1093
1094 out:
1095         pm_runtime_put(dev);
1096 }
1097
1098 /**
1099  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1100  * @state: PM transition of the system being carried out.
1101  *
1102  * Execute the ->complete() callbacks for all devices whose PM status is not
1103  * DPM_ON (this allows new devices to be registered).
1104  */
1105 void dpm_complete(pm_message_t state)
1106 {
1107         struct list_head list;
1108
1109         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1110         might_sleep();
1111
1112         INIT_LIST_HEAD(&list);
1113         mutex_lock(&dpm_list_mtx);
1114         while (!list_empty(&dpm_prepared_list)) {
1115                 struct device *dev = to_device(dpm_prepared_list.prev);
1116
1117                 get_device(dev);
1118                 dev->power.is_prepared = false;
1119                 list_move(&dev->power.entry, &list);
1120
1121                 mutex_unlock(&dpm_list_mtx);
1122
1123                 trace_device_pm_callback_start(dev, "", state.event);
1124                 device_complete(dev, state);
1125                 trace_device_pm_callback_end(dev, 0);
1126
1127                 put_device(dev);
1128
1129                 mutex_lock(&dpm_list_mtx);
1130         }
1131         list_splice(&list, &dpm_list);
1132         mutex_unlock(&dpm_list_mtx);
1133
1134         /* Allow device probing and trigger re-probing of deferred devices */
1135         device_unblock_probing();
1136         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1137 }
1138
1139 /**
1140  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1141  * @state: PM transition of the system being carried out.
1142  *
1143  * Execute "resume" callbacks for all devices and complete the PM transition of
1144  * the system.
1145  */
1146 void dpm_resume_end(pm_message_t state)
1147 {
1148         dpm_resume(state);
1149         dpm_complete(state);
1150 }
1151 EXPORT_SYMBOL_GPL(dpm_resume_end);
1152
1153
1154 /*------------------------- Suspend routines -------------------------*/
1155
1156 /**
1157  * resume_event - Return a "resume" message for given "suspend" sleep state.
1158  * @sleep_state: PM message representing a sleep state.
1159  *
1160  * Return a PM message representing the resume event corresponding to given
1161  * sleep state.
1162  */
1163 static pm_message_t resume_event(pm_message_t sleep_state)
1164 {
1165         switch (sleep_state.event) {
1166         case PM_EVENT_SUSPEND:
1167                 return PMSG_RESUME;
1168         case PM_EVENT_FREEZE:
1169         case PM_EVENT_QUIESCE:
1170                 return PMSG_RECOVER;
1171         case PM_EVENT_HIBERNATE:
1172                 return PMSG_RESTORE;
1173         }
1174         return PMSG_ON;
1175 }
1176
1177 static void dpm_superior_set_must_resume(struct device *dev)
1178 {
1179         struct device_link *link;
1180         int idx;
1181
1182         if (dev->parent)
1183                 dev->parent->power.must_resume = true;
1184
1185         idx = device_links_read_lock();
1186
1187         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1188                 link->supplier->power.must_resume = true;
1189
1190         device_links_read_unlock(idx);
1191 }
1192
1193 /**
1194  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1195  * @dev: Device to handle.
1196  * @state: PM transition of the system being carried out.
1197  * @async: If true, the device is being suspended asynchronously.
1198  *
1199  * The driver of @dev will not receive interrupts while this function is being
1200  * executed.
1201  */
1202 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1203 {
1204         pm_callback_t callback = NULL;
1205         const char *info = NULL;
1206         int error = 0;
1207
1208         TRACE_DEVICE(dev);
1209         TRACE_SUSPEND(0);
1210
1211         dpm_wait_for_subordinate(dev, async);
1212
1213         if (async_error)
1214                 goto Complete;
1215
1216         if (dev->power.syscore || dev->power.direct_complete)
1217                 goto Complete;
1218
1219         if (dev->pm_domain) {
1220                 info = "noirq power domain ";
1221                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1222         } else if (dev->type && dev->type->pm) {
1223                 info = "noirq type ";
1224                 callback = pm_noirq_op(dev->type->pm, state);
1225         } else if (dev->class && dev->class->pm) {
1226                 info = "noirq class ";
1227                 callback = pm_noirq_op(dev->class->pm, state);
1228         } else if (dev->bus && dev->bus->pm) {
1229                 info = "noirq bus ";
1230                 callback = pm_noirq_op(dev->bus->pm, state);
1231         }
1232         if (callback)
1233                 goto Run;
1234
1235         if (dev_pm_skip_suspend(dev))
1236                 goto Skip;
1237
1238         if (dev->driver && dev->driver->pm) {
1239                 info = "noirq driver ";
1240                 callback = pm_noirq_op(dev->driver->pm, state);
1241         }
1242
1243 Run:
1244         error = dpm_run_callback(callback, dev, state, info);
1245         if (error) {
1246                 async_error = error;
1247                 goto Complete;
1248         }
1249
1250 Skip:
1251         dev->power.is_noirq_suspended = true;
1252
1253         /*
1254          * Skipping the resume of devices that were in use right before the
1255          * system suspend (as indicated by their PM-runtime usage counters)
1256          * would be suboptimal.  Also resume them if doing that is not allowed
1257          * to be skipped.
1258          */
1259         if (atomic_read(&dev->power.usage_count) > 1 ||
1260             !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1261               dev->power.may_skip_resume))
1262                 dev->power.must_resume = true;
1263
1264         if (dev->power.must_resume)
1265                 dpm_superior_set_must_resume(dev);
1266
1267 Complete:
1268         complete_all(&dev->power.completion);
1269         TRACE_SUSPEND(error);
1270         return error;
1271 }
1272
1273 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1274 {
1275         struct device *dev = (struct device *)data;
1276         int error;
1277
1278         error = __device_suspend_noirq(dev, pm_transition, true);
1279         if (error) {
1280                 dpm_save_failed_dev(dev_name(dev));
1281                 pm_dev_err(dev, pm_transition, " async", error);
1282         }
1283
1284         put_device(dev);
1285 }
1286
1287 static int device_suspend_noirq(struct device *dev)
1288 {
1289         if (dpm_async_fn(dev, async_suspend_noirq))
1290                 return 0;
1291
1292         return __device_suspend_noirq(dev, pm_transition, false);
1293 }
1294
1295 static int dpm_noirq_suspend_devices(pm_message_t state)
1296 {
1297         ktime_t starttime = ktime_get();
1298         int error = 0;
1299
1300         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1301         mutex_lock(&dpm_list_mtx);
1302         pm_transition = state;
1303         async_error = 0;
1304
1305         while (!list_empty(&dpm_late_early_list)) {
1306                 struct device *dev = to_device(dpm_late_early_list.prev);
1307
1308                 get_device(dev);
1309                 mutex_unlock(&dpm_list_mtx);
1310
1311                 error = device_suspend_noirq(dev);
1312
1313                 mutex_lock(&dpm_list_mtx);
1314
1315                 if (error) {
1316                         pm_dev_err(dev, state, " noirq", error);
1317                         dpm_save_failed_dev(dev_name(dev));
1318                 } else if (!list_empty(&dev->power.entry)) {
1319                         list_move(&dev->power.entry, &dpm_noirq_list);
1320                 }
1321
1322                 mutex_unlock(&dpm_list_mtx);
1323
1324                 put_device(dev);
1325
1326                 mutex_lock(&dpm_list_mtx);
1327
1328                 if (error || async_error)
1329                         break;
1330         }
1331         mutex_unlock(&dpm_list_mtx);
1332         async_synchronize_full();
1333         if (!error)
1334                 error = async_error;
1335
1336         if (error) {
1337                 suspend_stats.failed_suspend_noirq++;
1338                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1339         }
1340         dpm_show_time(starttime, state, error, "noirq");
1341         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1342         return error;
1343 }
1344
1345 /**
1346  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1347  * @state: PM transition of the system being carried out.
1348  *
1349  * Prevent device drivers' interrupt handlers from being called and invoke
1350  * "noirq" suspend callbacks for all non-sysdev devices.
1351  */
1352 int dpm_suspend_noirq(pm_message_t state)
1353 {
1354         int ret;
1355
1356         cpuidle_pause();
1357
1358         device_wakeup_arm_wake_irqs();
1359         suspend_device_irqs();
1360
1361         ret = dpm_noirq_suspend_devices(state);
1362         if (ret)
1363                 dpm_resume_noirq(resume_event(state));
1364
1365         return ret;
1366 }
1367
1368 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1369 {
1370         struct device *parent = dev->parent;
1371
1372         if (!parent)
1373                 return;
1374
1375         spin_lock_irq(&parent->power.lock);
1376
1377         if (device_wakeup_path(dev) && !parent->power.ignore_children)
1378                 parent->power.wakeup_path = true;
1379
1380         spin_unlock_irq(&parent->power.lock);
1381 }
1382
1383 /**
1384  * __device_suspend_late - Execute a "late suspend" callback for given device.
1385  * @dev: Device to handle.
1386  * @state: PM transition of the system being carried out.
1387  * @async: If true, the device is being suspended asynchronously.
1388  *
1389  * Runtime PM is disabled for @dev while this function is being executed.
1390  */
1391 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1392 {
1393         pm_callback_t callback = NULL;
1394         const char *info = NULL;
1395         int error = 0;
1396
1397         TRACE_DEVICE(dev);
1398         TRACE_SUSPEND(0);
1399
1400         __pm_runtime_disable(dev, false);
1401
1402         dpm_wait_for_subordinate(dev, async);
1403
1404         if (async_error)
1405                 goto Complete;
1406
1407         if (pm_wakeup_pending()) {
1408                 async_error = -EBUSY;
1409                 goto Complete;
1410         }
1411
1412         if (dev->power.syscore || dev->power.direct_complete)
1413                 goto Complete;
1414
1415         if (dev->pm_domain) {
1416                 info = "late power domain ";
1417                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1418         } else if (dev->type && dev->type->pm) {
1419                 info = "late type ";
1420                 callback = pm_late_early_op(dev->type->pm, state);
1421         } else if (dev->class && dev->class->pm) {
1422                 info = "late class ";
1423                 callback = pm_late_early_op(dev->class->pm, state);
1424         } else if (dev->bus && dev->bus->pm) {
1425                 info = "late bus ";
1426                 callback = pm_late_early_op(dev->bus->pm, state);
1427         }
1428         if (callback)
1429                 goto Run;
1430
1431         if (dev_pm_skip_suspend(dev))
1432                 goto Skip;
1433
1434         if (dev->driver && dev->driver->pm) {
1435                 info = "late driver ";
1436                 callback = pm_late_early_op(dev->driver->pm, state);
1437         }
1438
1439 Run:
1440         error = dpm_run_callback(callback, dev, state, info);
1441         if (error) {
1442                 async_error = error;
1443                 goto Complete;
1444         }
1445         dpm_propagate_wakeup_to_parent(dev);
1446
1447 Skip:
1448         dev->power.is_late_suspended = true;
1449
1450 Complete:
1451         TRACE_SUSPEND(error);
1452         complete_all(&dev->power.completion);
1453         return error;
1454 }
1455
1456 static void async_suspend_late(void *data, async_cookie_t cookie)
1457 {
1458         struct device *dev = (struct device *)data;
1459         int error;
1460
1461         error = __device_suspend_late(dev, pm_transition, true);
1462         if (error) {
1463                 dpm_save_failed_dev(dev_name(dev));
1464                 pm_dev_err(dev, pm_transition, " async", error);
1465         }
1466         put_device(dev);
1467 }
1468
1469 static int device_suspend_late(struct device *dev)
1470 {
1471         if (dpm_async_fn(dev, async_suspend_late))
1472                 return 0;
1473
1474         return __device_suspend_late(dev, pm_transition, false);
1475 }
1476
1477 /**
1478  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1479  * @state: PM transition of the system being carried out.
1480  */
1481 int dpm_suspend_late(pm_message_t state)
1482 {
1483         ktime_t starttime = ktime_get();
1484         int error = 0;
1485
1486         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1487         mutex_lock(&dpm_list_mtx);
1488         pm_transition = state;
1489         async_error = 0;
1490
1491         while (!list_empty(&dpm_suspended_list)) {
1492                 struct device *dev = to_device(dpm_suspended_list.prev);
1493
1494                 get_device(dev);
1495
1496                 mutex_unlock(&dpm_list_mtx);
1497
1498                 error = device_suspend_late(dev);
1499
1500                 mutex_lock(&dpm_list_mtx);
1501
1502                 if (!list_empty(&dev->power.entry))
1503                         list_move(&dev->power.entry, &dpm_late_early_list);
1504
1505                 if (error) {
1506                         pm_dev_err(dev, state, " late", error);
1507                         dpm_save_failed_dev(dev_name(dev));
1508                 }
1509
1510                 mutex_unlock(&dpm_list_mtx);
1511
1512                 put_device(dev);
1513
1514                 mutex_lock(&dpm_list_mtx);
1515
1516                 if (error || async_error)
1517                         break;
1518         }
1519         mutex_unlock(&dpm_list_mtx);
1520         async_synchronize_full();
1521         if (!error)
1522                 error = async_error;
1523         if (error) {
1524                 suspend_stats.failed_suspend_late++;
1525                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1526                 dpm_resume_early(resume_event(state));
1527         }
1528         dpm_show_time(starttime, state, error, "late");
1529         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1530         return error;
1531 }
1532
1533 /**
1534  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1535  * @state: PM transition of the system being carried out.
1536  */
1537 int dpm_suspend_end(pm_message_t state)
1538 {
1539         ktime_t starttime = ktime_get();
1540         int error;
1541
1542         error = dpm_suspend_late(state);
1543         if (error)
1544                 goto out;
1545
1546         error = dpm_suspend_noirq(state);
1547         if (error)
1548                 dpm_resume_early(resume_event(state));
1549
1550 out:
1551         dpm_show_time(starttime, state, error, "end");
1552         return error;
1553 }
1554 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1555
1556 /**
1557  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1558  * @dev: Device to suspend.
1559  * @state: PM transition of the system being carried out.
1560  * @cb: Suspend callback to execute.
1561  * @info: string description of caller.
1562  */
1563 static int legacy_suspend(struct device *dev, pm_message_t state,
1564                           int (*cb)(struct device *dev, pm_message_t state),
1565                           const char *info)
1566 {
1567         int error;
1568         ktime_t calltime;
1569
1570         calltime = initcall_debug_start(dev, cb);
1571
1572         trace_device_pm_callback_start(dev, info, state.event);
1573         error = cb(dev, state);
1574         trace_device_pm_callback_end(dev, error);
1575         suspend_report_result(cb, error);
1576
1577         initcall_debug_report(dev, calltime, cb, error);
1578
1579         return error;
1580 }
1581
1582 static void dpm_clear_superiors_direct_complete(struct device *dev)
1583 {
1584         struct device_link *link;
1585         int idx;
1586
1587         if (dev->parent) {
1588                 spin_lock_irq(&dev->parent->power.lock);
1589                 dev->parent->power.direct_complete = false;
1590                 spin_unlock_irq(&dev->parent->power.lock);
1591         }
1592
1593         idx = device_links_read_lock();
1594
1595         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1596                 spin_lock_irq(&link->supplier->power.lock);
1597                 link->supplier->power.direct_complete = false;
1598                 spin_unlock_irq(&link->supplier->power.lock);
1599         }
1600
1601         device_links_read_unlock(idx);
1602 }
1603
1604 /**
1605  * __device_suspend - Execute "suspend" callbacks for given device.
1606  * @dev: Device to handle.
1607  * @state: PM transition of the system being carried out.
1608  * @async: If true, the device is being suspended asynchronously.
1609  */
1610 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1611 {
1612         pm_callback_t callback = NULL;
1613         const char *info = NULL;
1614         int error = 0;
1615         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1616
1617         TRACE_DEVICE(dev);
1618         TRACE_SUSPEND(0);
1619
1620         dpm_wait_for_subordinate(dev, async);
1621
1622         if (async_error) {
1623                 dev->power.direct_complete = false;
1624                 goto Complete;
1625         }
1626
1627         /*
1628          * Wait for possible runtime PM transitions of the device in progress
1629          * to complete and if there's a runtime resume request pending for it,
1630          * resume it before proceeding with invoking the system-wide suspend
1631          * callbacks for it.
1632          *
1633          * If the system-wide suspend callbacks below change the configuration
1634          * of the device, they must disable runtime PM for it or otherwise
1635          * ensure that its runtime-resume callbacks will not be confused by that
1636          * change in case they are invoked going forward.
1637          */
1638         pm_runtime_barrier(dev);
1639
1640         if (pm_wakeup_pending()) {
1641                 dev->power.direct_complete = false;
1642                 async_error = -EBUSY;
1643                 goto Complete;
1644         }
1645
1646         if (dev->power.syscore)
1647                 goto Complete;
1648
1649         /* Avoid direct_complete to let wakeup_path propagate. */
1650         if (device_may_wakeup(dev) || device_wakeup_path(dev))
1651                 dev->power.direct_complete = false;
1652
1653         if (dev->power.direct_complete) {
1654                 if (pm_runtime_status_suspended(dev)) {
1655                         pm_runtime_disable(dev);
1656                         if (pm_runtime_status_suspended(dev)) {
1657                                 pm_dev_dbg(dev, state, "direct-complete ");
1658                                 goto Complete;
1659                         }
1660
1661                         pm_runtime_enable(dev);
1662                 }
1663                 dev->power.direct_complete = false;
1664         }
1665
1666         dev->power.may_skip_resume = true;
1667         dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1668
1669         dpm_watchdog_set(&wd, dev);
1670         device_lock(dev);
1671
1672         if (dev->pm_domain) {
1673                 info = "power domain ";
1674                 callback = pm_op(&dev->pm_domain->ops, state);
1675                 goto Run;
1676         }
1677
1678         if (dev->type && dev->type->pm) {
1679                 info = "type ";
1680                 callback = pm_op(dev->type->pm, state);
1681                 goto Run;
1682         }
1683
1684         if (dev->class && dev->class->pm) {
1685                 info = "class ";
1686                 callback = pm_op(dev->class->pm, state);
1687                 goto Run;
1688         }
1689
1690         if (dev->bus) {
1691                 if (dev->bus->pm) {
1692                         info = "bus ";
1693                         callback = pm_op(dev->bus->pm, state);
1694                 } else if (dev->bus->suspend) {
1695                         pm_dev_dbg(dev, state, "legacy bus ");
1696                         error = legacy_suspend(dev, state, dev->bus->suspend,
1697                                                 "legacy bus ");
1698                         goto End;
1699                 }
1700         }
1701
1702  Run:
1703         if (!callback && dev->driver && dev->driver->pm) {
1704                 info = "driver ";
1705                 callback = pm_op(dev->driver->pm, state);
1706         }
1707
1708         error = dpm_run_callback(callback, dev, state, info);
1709
1710  End:
1711         if (!error) {
1712                 dev->power.is_suspended = true;
1713                 if (device_may_wakeup(dev))
1714                         dev->power.wakeup_path = true;
1715
1716                 dpm_propagate_wakeup_to_parent(dev);
1717                 dpm_clear_superiors_direct_complete(dev);
1718         }
1719
1720         device_unlock(dev);
1721         dpm_watchdog_clear(&wd);
1722
1723  Complete:
1724         if (error)
1725                 async_error = error;
1726
1727         complete_all(&dev->power.completion);
1728         TRACE_SUSPEND(error);
1729         return error;
1730 }
1731
1732 static void async_suspend(void *data, async_cookie_t cookie)
1733 {
1734         struct device *dev = (struct device *)data;
1735         int error;
1736
1737         error = __device_suspend(dev, pm_transition, true);
1738         if (error) {
1739                 dpm_save_failed_dev(dev_name(dev));
1740                 pm_dev_err(dev, pm_transition, " async", error);
1741         }
1742
1743         put_device(dev);
1744 }
1745
1746 static int device_suspend(struct device *dev)
1747 {
1748         if (dpm_async_fn(dev, async_suspend))
1749                 return 0;
1750
1751         return __device_suspend(dev, pm_transition, false);
1752 }
1753
1754 /**
1755  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1756  * @state: PM transition of the system being carried out.
1757  */
1758 int dpm_suspend(pm_message_t state)
1759 {
1760         ktime_t starttime = ktime_get();
1761         int error = 0;
1762
1763         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1764         might_sleep();
1765
1766         devfreq_suspend();
1767         cpufreq_suspend();
1768
1769         mutex_lock(&dpm_list_mtx);
1770         pm_transition = state;
1771         async_error = 0;
1772         while (!list_empty(&dpm_prepared_list)) {
1773                 struct device *dev = to_device(dpm_prepared_list.prev);
1774
1775                 get_device(dev);
1776
1777                 mutex_unlock(&dpm_list_mtx);
1778
1779                 error = device_suspend(dev);
1780
1781                 mutex_lock(&dpm_list_mtx);
1782
1783                 if (error) {
1784                         pm_dev_err(dev, state, "", error);
1785                         dpm_save_failed_dev(dev_name(dev));
1786                 } else if (!list_empty(&dev->power.entry)) {
1787                         list_move(&dev->power.entry, &dpm_suspended_list);
1788                 }
1789
1790                 mutex_unlock(&dpm_list_mtx);
1791
1792                 put_device(dev);
1793
1794                 mutex_lock(&dpm_list_mtx);
1795
1796                 if (error || async_error)
1797                         break;
1798         }
1799         mutex_unlock(&dpm_list_mtx);
1800         async_synchronize_full();
1801         if (!error)
1802                 error = async_error;
1803         if (error) {
1804                 suspend_stats.failed_suspend++;
1805                 dpm_save_failed_step(SUSPEND_SUSPEND);
1806         }
1807         dpm_show_time(starttime, state, error, NULL);
1808         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1809         return error;
1810 }
1811
1812 /**
1813  * device_prepare - Prepare a device for system power transition.
1814  * @dev: Device to handle.
1815  * @state: PM transition of the system being carried out.
1816  *
1817  * Execute the ->prepare() callback(s) for given device.  No new children of the
1818  * device may be registered after this function has returned.
1819  */
1820 static int device_prepare(struct device *dev, pm_message_t state)
1821 {
1822         int (*callback)(struct device *) = NULL;
1823         int ret = 0;
1824
1825         /*
1826          * If a device's parent goes into runtime suspend at the wrong time,
1827          * it won't be possible to resume the device.  To prevent this we
1828          * block runtime suspend here, during the prepare phase, and allow
1829          * it again during the complete phase.
1830          */
1831         pm_runtime_get_noresume(dev);
1832
1833         if (dev->power.syscore)
1834                 return 0;
1835
1836         device_lock(dev);
1837
1838         dev->power.wakeup_path = false;
1839
1840         if (dev->power.no_pm_callbacks)
1841                 goto unlock;
1842
1843         if (dev->pm_domain)
1844                 callback = dev->pm_domain->ops.prepare;
1845         else if (dev->type && dev->type->pm)
1846                 callback = dev->type->pm->prepare;
1847         else if (dev->class && dev->class->pm)
1848                 callback = dev->class->pm->prepare;
1849         else if (dev->bus && dev->bus->pm)
1850                 callback = dev->bus->pm->prepare;
1851
1852         if (!callback && dev->driver && dev->driver->pm)
1853                 callback = dev->driver->pm->prepare;
1854
1855         if (callback)
1856                 ret = callback(dev);
1857
1858 unlock:
1859         device_unlock(dev);
1860
1861         if (ret < 0) {
1862                 suspend_report_result(callback, ret);
1863                 pm_runtime_put(dev);
1864                 return ret;
1865         }
1866         /*
1867          * A positive return value from ->prepare() means "this device appears
1868          * to be runtime-suspended and its state is fine, so if it really is
1869          * runtime-suspended, you can leave it in that state provided that you
1870          * will do the same thing with all of its descendants".  This only
1871          * applies to suspend transitions, however.
1872          */
1873         spin_lock_irq(&dev->power.lock);
1874         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1875                 (ret > 0 || dev->power.no_pm_callbacks) &&
1876                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1877         spin_unlock_irq(&dev->power.lock);
1878         return 0;
1879 }
1880
1881 /**
1882  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1883  * @state: PM transition of the system being carried out.
1884  *
1885  * Execute the ->prepare() callback(s) for all devices.
1886  */
1887 int dpm_prepare(pm_message_t state)
1888 {
1889         int error = 0;
1890
1891         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1892         might_sleep();
1893
1894         /*
1895          * Give a chance for the known devices to complete their probes, before
1896          * disable probing of devices. This sync point is important at least
1897          * at boot time + hibernation restore.
1898          */
1899         wait_for_device_probe();
1900         /*
1901          * It is unsafe if probing of devices will happen during suspend or
1902          * hibernation and system behavior will be unpredictable in this case.
1903          * So, let's prohibit device's probing here and defer their probes
1904          * instead. The normal behavior will be restored in dpm_complete().
1905          */
1906         device_block_probing();
1907
1908         mutex_lock(&dpm_list_mtx);
1909         while (!list_empty(&dpm_list) && !error) {
1910                 struct device *dev = to_device(dpm_list.next);
1911
1912                 get_device(dev);
1913
1914                 mutex_unlock(&dpm_list_mtx);
1915
1916                 trace_device_pm_callback_start(dev, "", state.event);
1917                 error = device_prepare(dev, state);
1918                 trace_device_pm_callback_end(dev, error);
1919
1920                 mutex_lock(&dpm_list_mtx);
1921
1922                 if (!error) {
1923                         dev->power.is_prepared = true;
1924                         if (!list_empty(&dev->power.entry))
1925                                 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1926                 } else if (error == -EAGAIN) {
1927                         error = 0;
1928                 } else {
1929                         dev_info(dev, "not prepared for power transition: code %d\n",
1930                                  error);
1931                 }
1932
1933                 mutex_unlock(&dpm_list_mtx);
1934
1935                 put_device(dev);
1936
1937                 mutex_lock(&dpm_list_mtx);
1938         }
1939         mutex_unlock(&dpm_list_mtx);
1940         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1941         return error;
1942 }
1943
1944 /**
1945  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1946  * @state: PM transition of the system being carried out.
1947  *
1948  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1949  * callbacks for them.
1950  */
1951 int dpm_suspend_start(pm_message_t state)
1952 {
1953         ktime_t starttime = ktime_get();
1954         int error;
1955
1956         error = dpm_prepare(state);
1957         if (error) {
1958                 suspend_stats.failed_prepare++;
1959                 dpm_save_failed_step(SUSPEND_PREPARE);
1960         } else
1961                 error = dpm_suspend(state);
1962         dpm_show_time(starttime, state, error, "start");
1963         return error;
1964 }
1965 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1966
1967 void __suspend_report_result(const char *function, void *fn, int ret)
1968 {
1969         if (ret)
1970                 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1971 }
1972 EXPORT_SYMBOL_GPL(__suspend_report_result);
1973
1974 /**
1975  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1976  * @subordinate: Device that needs to wait for @dev.
1977  * @dev: Device to wait for.
1978  */
1979 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1980 {
1981         dpm_wait(dev, subordinate->power.async_suspend);
1982         return async_error;
1983 }
1984 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1985
1986 /**
1987  * dpm_for_each_dev - device iterator.
1988  * @data: data for the callback.
1989  * @fn: function to be called for each device.
1990  *
1991  * Iterate over devices in dpm_list, and call @fn for each device,
1992  * passing it @data.
1993  */
1994 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1995 {
1996         struct device *dev;
1997
1998         if (!fn)
1999                 return;
2000
2001         device_pm_lock();
2002         list_for_each_entry(dev, &dpm_list, power.entry)
2003                 fn(dev, data);
2004         device_pm_unlock();
2005 }
2006 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2007
2008 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2009 {
2010         if (!ops)
2011                 return true;
2012
2013         return !ops->prepare &&
2014                !ops->suspend &&
2015                !ops->suspend_late &&
2016                !ops->suspend_noirq &&
2017                !ops->resume_noirq &&
2018                !ops->resume_early &&
2019                !ops->resume &&
2020                !ops->complete;
2021 }
2022
2023 void device_pm_check_callbacks(struct device *dev)
2024 {
2025         unsigned long flags;
2026
2027         spin_lock_irqsave(&dev->power.lock, flags);
2028         dev->power.no_pm_callbacks =
2029                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2030                  !dev->bus->suspend && !dev->bus->resume)) &&
2031                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2032                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2033                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2034                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2035                  !dev->driver->suspend && !dev->driver->resume));
2036         spin_unlock_irqrestore(&dev->power.lock, flags);
2037 }
2038
2039 bool dev_pm_skip_suspend(struct device *dev)
2040 {
2041         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2042                 pm_runtime_status_suspended(dev);
2043 }