PM core: rename suspend and resume functions
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/resume-trace.h>
25 #include <linux/rwsem.h>
26 #include <linux/interrupt.h>
27
28 #include "../base.h"
29 #include "power.h"
30
31 /*
32  * The entries in the dpm_list list are in a depth first order, simply
33  * because children are guaranteed to be discovered after parents, and
34  * are inserted at the back of the list on discovery.
35  *
36  * Since device_pm_add() may be called with a device semaphore held,
37  * we must never try to acquire a device semaphore while holding
38  * dpm_list_mutex.
39  */
40
41 LIST_HEAD(dpm_list);
42
43 static DEFINE_MUTEX(dpm_list_mtx);
44
45 /*
46  * Set once the preparation of devices for a PM transition has started, reset
47  * before starting to resume devices.  Protected by dpm_list_mtx.
48  */
49 static bool transition_started;
50
51 /**
52  *      device_pm_lock - lock the list of active devices used by the PM core
53  */
54 void device_pm_lock(void)
55 {
56         mutex_lock(&dpm_list_mtx);
57 }
58
59 /**
60  *      device_pm_unlock - unlock the list of active devices used by the PM core
61  */
62 void device_pm_unlock(void)
63 {
64         mutex_unlock(&dpm_list_mtx);
65 }
66
67 /**
68  *      device_pm_add - add a device to the list of active devices
69  *      @dev:   Device to be added to the list
70  */
71 void device_pm_add(struct device *dev)
72 {
73         pr_debug("PM: Adding info for %s:%s\n",
74                  dev->bus ? dev->bus->name : "No Bus",
75                  kobject_name(&dev->kobj));
76         mutex_lock(&dpm_list_mtx);
77         if (dev->parent) {
78                 if (dev->parent->power.status >= DPM_SUSPENDING)
79                         dev_warn(dev, "parent %s should not be sleeping\n",
80                                  dev_name(dev->parent));
81         } else if (transition_started) {
82                 /*
83                  * We refuse to register parentless devices while a PM
84                  * transition is in progress in order to avoid leaving them
85                  * unhandled down the road
86                  */
87                 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
88         }
89
90         list_add_tail(&dev->power.entry, &dpm_list);
91         mutex_unlock(&dpm_list_mtx);
92 }
93
94 /**
95  *      device_pm_remove - remove a device from the list of active devices
96  *      @dev:   Device to be removed from the list
97  *
98  *      This function also removes the device's PM-related sysfs attributes.
99  */
100 void device_pm_remove(struct device *dev)
101 {
102         pr_debug("PM: Removing info for %s:%s\n",
103                  dev->bus ? dev->bus->name : "No Bus",
104                  kobject_name(&dev->kobj));
105         mutex_lock(&dpm_list_mtx);
106         list_del_init(&dev->power.entry);
107         mutex_unlock(&dpm_list_mtx);
108 }
109
110 /**
111  *      device_pm_move_before - move device in dpm_list
112  *      @deva:  Device to move in dpm_list
113  *      @devb:  Device @deva should come before
114  */
115 void device_pm_move_before(struct device *deva, struct device *devb)
116 {
117         pr_debug("PM: Moving %s:%s before %s:%s\n",
118                  deva->bus ? deva->bus->name : "No Bus",
119                  kobject_name(&deva->kobj),
120                  devb->bus ? devb->bus->name : "No Bus",
121                  kobject_name(&devb->kobj));
122         /* Delete deva from dpm_list and reinsert before devb. */
123         list_move_tail(&deva->power.entry, &devb->power.entry);
124 }
125
126 /**
127  *      device_pm_move_after - move device in dpm_list
128  *      @deva:  Device to move in dpm_list
129  *      @devb:  Device @deva should come after
130  */
131 void device_pm_move_after(struct device *deva, struct device *devb)
132 {
133         pr_debug("PM: Moving %s:%s after %s:%s\n",
134                  deva->bus ? deva->bus->name : "No Bus",
135                  kobject_name(&deva->kobj),
136                  devb->bus ? devb->bus->name : "No Bus",
137                  kobject_name(&devb->kobj));
138         /* Delete deva from dpm_list and reinsert after devb. */
139         list_move(&deva->power.entry, &devb->power.entry);
140 }
141
142 /**
143  *      device_pm_move_last - move device to end of dpm_list
144  *      @dev:   Device to move in dpm_list
145  */
146 void device_pm_move_last(struct device *dev)
147 {
148         pr_debug("PM: Moving %s:%s to end of list\n",
149                  dev->bus ? dev->bus->name : "No Bus",
150                  kobject_name(&dev->kobj));
151         list_move_tail(&dev->power.entry, &dpm_list);
152 }
153
154 /**
155  *      pm_op - execute the PM operation appropiate for given PM event
156  *      @dev:   Device.
157  *      @ops:   PM operations to choose from.
158  *      @state: PM transition of the system being carried out.
159  */
160 static int pm_op(struct device *dev, struct dev_pm_ops *ops,
161                         pm_message_t state)
162 {
163         int error = 0;
164
165         switch (state.event) {
166 #ifdef CONFIG_SUSPEND
167         case PM_EVENT_SUSPEND:
168                 if (ops->suspend) {
169                         error = ops->suspend(dev);
170                         suspend_report_result(ops->suspend, error);
171                 }
172                 break;
173         case PM_EVENT_RESUME:
174                 if (ops->resume) {
175                         error = ops->resume(dev);
176                         suspend_report_result(ops->resume, error);
177                 }
178                 break;
179 #endif /* CONFIG_SUSPEND */
180 #ifdef CONFIG_HIBERNATION
181         case PM_EVENT_FREEZE:
182         case PM_EVENT_QUIESCE:
183                 if (ops->freeze) {
184                         error = ops->freeze(dev);
185                         suspend_report_result(ops->freeze, error);
186                 }
187                 break;
188         case PM_EVENT_HIBERNATE:
189                 if (ops->poweroff) {
190                         error = ops->poweroff(dev);
191                         suspend_report_result(ops->poweroff, error);
192                 }
193                 break;
194         case PM_EVENT_THAW:
195         case PM_EVENT_RECOVER:
196                 if (ops->thaw) {
197                         error = ops->thaw(dev);
198                         suspend_report_result(ops->thaw, error);
199                 }
200                 break;
201         case PM_EVENT_RESTORE:
202                 if (ops->restore) {
203                         error = ops->restore(dev);
204                         suspend_report_result(ops->restore, error);
205                 }
206                 break;
207 #endif /* CONFIG_HIBERNATION */
208         default:
209                 error = -EINVAL;
210         }
211         return error;
212 }
213
214 /**
215  *      pm_noirq_op - execute the PM operation appropiate for given PM event
216  *      @dev:   Device.
217  *      @ops:   PM operations to choose from.
218  *      @state: PM transition of the system being carried out.
219  *
220  *      The operation is executed with interrupts disabled by the only remaining
221  *      functional CPU in the system.
222  */
223 static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops,
224                         pm_message_t state)
225 {
226         int error = 0;
227
228         switch (state.event) {
229 #ifdef CONFIG_SUSPEND
230         case PM_EVENT_SUSPEND:
231                 if (ops->suspend_noirq) {
232                         error = ops->suspend_noirq(dev);
233                         suspend_report_result(ops->suspend_noirq, error);
234                 }
235                 break;
236         case PM_EVENT_RESUME:
237                 if (ops->resume_noirq) {
238                         error = ops->resume_noirq(dev);
239                         suspend_report_result(ops->resume_noirq, error);
240                 }
241                 break;
242 #endif /* CONFIG_SUSPEND */
243 #ifdef CONFIG_HIBERNATION
244         case PM_EVENT_FREEZE:
245         case PM_EVENT_QUIESCE:
246                 if (ops->freeze_noirq) {
247                         error = ops->freeze_noirq(dev);
248                         suspend_report_result(ops->freeze_noirq, error);
249                 }
250                 break;
251         case PM_EVENT_HIBERNATE:
252                 if (ops->poweroff_noirq) {
253                         error = ops->poweroff_noirq(dev);
254                         suspend_report_result(ops->poweroff_noirq, error);
255                 }
256                 break;
257         case PM_EVENT_THAW:
258         case PM_EVENT_RECOVER:
259                 if (ops->thaw_noirq) {
260                         error = ops->thaw_noirq(dev);
261                         suspend_report_result(ops->thaw_noirq, error);
262                 }
263                 break;
264         case PM_EVENT_RESTORE:
265                 if (ops->restore_noirq) {
266                         error = ops->restore_noirq(dev);
267                         suspend_report_result(ops->restore_noirq, error);
268                 }
269                 break;
270 #endif /* CONFIG_HIBERNATION */
271         default:
272                 error = -EINVAL;
273         }
274         return error;
275 }
276
277 static char *pm_verb(int event)
278 {
279         switch (event) {
280         case PM_EVENT_SUSPEND:
281                 return "suspend";
282         case PM_EVENT_RESUME:
283                 return "resume";
284         case PM_EVENT_FREEZE:
285                 return "freeze";
286         case PM_EVENT_QUIESCE:
287                 return "quiesce";
288         case PM_EVENT_HIBERNATE:
289                 return "hibernate";
290         case PM_EVENT_THAW:
291                 return "thaw";
292         case PM_EVENT_RESTORE:
293                 return "restore";
294         case PM_EVENT_RECOVER:
295                 return "recover";
296         default:
297                 return "(unknown PM event)";
298         }
299 }
300
301 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
302 {
303         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
304                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
305                 ", may wakeup" : "");
306 }
307
308 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
309                         int error)
310 {
311         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
312                 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
313 }
314
315 /*------------------------- Resume routines -------------------------*/
316
317 /**
318  *      device_resume_noirq - Power on one device (early resume).
319  *      @dev:   Device.
320  *      @state: PM transition of the system being carried out.
321  *
322  *      Must be called with interrupts disabled.
323  */
324 static int device_resume_noirq(struct device *dev, pm_message_t state)
325 {
326         int error = 0;
327
328         TRACE_DEVICE(dev);
329         TRACE_RESUME(0);
330
331         if (!dev->bus)
332                 goto End;
333
334         if (dev->bus->pm) {
335                 pm_dev_dbg(dev, state, "EARLY ");
336                 error = pm_noirq_op(dev, dev->bus->pm, state);
337         } else if (dev->bus->resume_early) {
338                 pm_dev_dbg(dev, state, "legacy EARLY ");
339                 error = dev->bus->resume_early(dev);
340         }
341  End:
342         TRACE_RESUME(error);
343         return error;
344 }
345
346 /**
347  *      dpm_resume_noirq - Power on all regular (non-sysdev) devices.
348  *      @state: PM transition of the system being carried out.
349  *
350  *      Call the "noirq" resume handlers for all devices marked as
351  *      DPM_OFF_IRQ and enable device drivers to receive interrupts.
352  *
353  *      Must be called under dpm_list_mtx.  Device drivers should not receive
354  *      interrupts while it's being executed.
355  */
356 void dpm_resume_noirq(pm_message_t state)
357 {
358         struct device *dev;
359
360         mutex_lock(&dpm_list_mtx);
361         list_for_each_entry(dev, &dpm_list, power.entry)
362                 if (dev->power.status > DPM_OFF) {
363                         int error;
364
365                         dev->power.status = DPM_OFF;
366                         error = device_resume_noirq(dev, state);
367                         if (error)
368                                 pm_dev_err(dev, state, " early", error);
369                 }
370         mutex_unlock(&dpm_list_mtx);
371         resume_device_irqs();
372 }
373 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
374
375 /**
376  *      device_resume - Restore state for one device.
377  *      @dev:   Device.
378  *      @state: PM transition of the system being carried out.
379  */
380 static int device_resume(struct device *dev, pm_message_t state)
381 {
382         int error = 0;
383
384         TRACE_DEVICE(dev);
385         TRACE_RESUME(0);
386
387         down(&dev->sem);
388
389         if (dev->bus) {
390                 if (dev->bus->pm) {
391                         pm_dev_dbg(dev, state, "");
392                         error = pm_op(dev, dev->bus->pm, state);
393                 } else if (dev->bus->resume) {
394                         pm_dev_dbg(dev, state, "legacy ");
395                         error = dev->bus->resume(dev);
396                 }
397                 if (error)
398                         goto End;
399         }
400
401         if (dev->type) {
402                 if (dev->type->pm) {
403                         pm_dev_dbg(dev, state, "type ");
404                         error = pm_op(dev, dev->type->pm, state);
405                 } else if (dev->type->resume) {
406                         pm_dev_dbg(dev, state, "legacy type ");
407                         error = dev->type->resume(dev);
408                 }
409                 if (error)
410                         goto End;
411         }
412
413         if (dev->class) {
414                 if (dev->class->pm) {
415                         pm_dev_dbg(dev, state, "class ");
416                         error = pm_op(dev, dev->class->pm, state);
417                 } else if (dev->class->resume) {
418                         pm_dev_dbg(dev, state, "legacy class ");
419                         error = dev->class->resume(dev);
420                 }
421         }
422  End:
423         up(&dev->sem);
424
425         TRACE_RESUME(error);
426         return error;
427 }
428
429 /**
430  *      dpm_resume - Resume every device.
431  *      @state: PM transition of the system being carried out.
432  *
433  *      Execute the appropriate "resume" callback for all devices the status of
434  *      which indicates that they are inactive.
435  */
436 static void dpm_resume(pm_message_t state)
437 {
438         struct list_head list;
439
440         INIT_LIST_HEAD(&list);
441         mutex_lock(&dpm_list_mtx);
442         transition_started = false;
443         while (!list_empty(&dpm_list)) {
444                 struct device *dev = to_device(dpm_list.next);
445
446                 get_device(dev);
447                 if (dev->power.status >= DPM_OFF) {
448                         int error;
449
450                         dev->power.status = DPM_RESUMING;
451                         mutex_unlock(&dpm_list_mtx);
452
453                         error = device_resume(dev, state);
454
455                         mutex_lock(&dpm_list_mtx);
456                         if (error)
457                                 pm_dev_err(dev, state, "", error);
458                 } else if (dev->power.status == DPM_SUSPENDING) {
459                         /* Allow new children of the device to be registered */
460                         dev->power.status = DPM_RESUMING;
461                 }
462                 if (!list_empty(&dev->power.entry))
463                         list_move_tail(&dev->power.entry, &list);
464                 put_device(dev);
465         }
466         list_splice(&list, &dpm_list);
467         mutex_unlock(&dpm_list_mtx);
468 }
469
470 /**
471  *      device_complete - Complete a PM transition for given device
472  *      @dev:   Device.
473  *      @state: PM transition of the system being carried out.
474  */
475 static void device_complete(struct device *dev, pm_message_t state)
476 {
477         down(&dev->sem);
478
479         if (dev->class && dev->class->pm && dev->class->pm->complete) {
480                 pm_dev_dbg(dev, state, "completing class ");
481                 dev->class->pm->complete(dev);
482         }
483
484         if (dev->type && dev->type->pm && dev->type->pm->complete) {
485                 pm_dev_dbg(dev, state, "completing type ");
486                 dev->type->pm->complete(dev);
487         }
488
489         if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
490                 pm_dev_dbg(dev, state, "completing ");
491                 dev->bus->pm->complete(dev);
492         }
493
494         up(&dev->sem);
495 }
496
497 /**
498  *      dpm_complete - Complete a PM transition for all devices.
499  *      @state: PM transition of the system being carried out.
500  *
501  *      Execute the ->complete() callbacks for all devices that are not marked
502  *      as DPM_ON.
503  */
504 static void dpm_complete(pm_message_t state)
505 {
506         struct list_head list;
507
508         INIT_LIST_HEAD(&list);
509         mutex_lock(&dpm_list_mtx);
510         while (!list_empty(&dpm_list)) {
511                 struct device *dev = to_device(dpm_list.prev);
512
513                 get_device(dev);
514                 if (dev->power.status > DPM_ON) {
515                         dev->power.status = DPM_ON;
516                         mutex_unlock(&dpm_list_mtx);
517
518                         device_complete(dev, state);
519
520                         mutex_lock(&dpm_list_mtx);
521                 }
522                 if (!list_empty(&dev->power.entry))
523                         list_move(&dev->power.entry, &list);
524                 put_device(dev);
525         }
526         list_splice(&list, &dpm_list);
527         mutex_unlock(&dpm_list_mtx);
528 }
529
530 /**
531  *      dpm_resume_end - Restore state of each device in system.
532  *      @state: PM transition of the system being carried out.
533  *
534  *      Resume all the devices, unlock them all, and allow new
535  *      devices to be registered once again.
536  */
537 void dpm_resume_end(pm_message_t state)
538 {
539         might_sleep();
540         dpm_resume(state);
541         dpm_complete(state);
542 }
543 EXPORT_SYMBOL_GPL(dpm_resume_end);
544
545
546 /*------------------------- Suspend routines -------------------------*/
547
548 /**
549  *      resume_event - return a PM message representing the resume event
550  *                     corresponding to given sleep state.
551  *      @sleep_state: PM message representing a sleep state.
552  */
553 static pm_message_t resume_event(pm_message_t sleep_state)
554 {
555         switch (sleep_state.event) {
556         case PM_EVENT_SUSPEND:
557                 return PMSG_RESUME;
558         case PM_EVENT_FREEZE:
559         case PM_EVENT_QUIESCE:
560                 return PMSG_RECOVER;
561         case PM_EVENT_HIBERNATE:
562                 return PMSG_RESTORE;
563         }
564         return PMSG_ON;
565 }
566
567 /**
568  *      device_suspend_noirq - Shut down one device (late suspend).
569  *      @dev:   Device.
570  *      @state: PM transition of the system being carried out.
571  *
572  *      This is called with interrupts off and only a single CPU running.
573  */
574 static int device_suspend_noirq(struct device *dev, pm_message_t state)
575 {
576         int error = 0;
577
578         if (!dev->bus)
579                 return 0;
580
581         if (dev->bus->pm) {
582                 pm_dev_dbg(dev, state, "LATE ");
583                 error = pm_noirq_op(dev, dev->bus->pm, state);
584         } else if (dev->bus->suspend_late) {
585                 pm_dev_dbg(dev, state, "legacy LATE ");
586                 error = dev->bus->suspend_late(dev, state);
587                 suspend_report_result(dev->bus->suspend_late, error);
588         }
589         return error;
590 }
591
592 /**
593  *      dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
594  *      @state: PM transition of the system being carried out.
595  *
596  *      Prevent device drivers from receiving interrupts and call the "noirq"
597  *      suspend handlers.
598  *
599  *      Must be called under dpm_list_mtx.
600  */
601 int dpm_suspend_noirq(pm_message_t state)
602 {
603         struct device *dev;
604         int error = 0;
605
606         suspend_device_irqs();
607         mutex_lock(&dpm_list_mtx);
608         list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
609                 error = device_suspend_noirq(dev, state);
610                 if (error) {
611                         pm_dev_err(dev, state, " late", error);
612                         break;
613                 }
614                 dev->power.status = DPM_OFF_IRQ;
615         }
616         mutex_unlock(&dpm_list_mtx);
617         if (error)
618                 dpm_resume_noirq(resume_event(state));
619         return error;
620 }
621 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
622
623 /**
624  *      device_suspend - Save state of one device.
625  *      @dev:   Device.
626  *      @state: PM transition of the system being carried out.
627  */
628 static int device_suspend(struct device *dev, pm_message_t state)
629 {
630         int error = 0;
631
632         down(&dev->sem);
633
634         if (dev->class) {
635                 if (dev->class->pm) {
636                         pm_dev_dbg(dev, state, "class ");
637                         error = pm_op(dev, dev->class->pm, state);
638                 } else if (dev->class->suspend) {
639                         pm_dev_dbg(dev, state, "legacy class ");
640                         error = dev->class->suspend(dev, state);
641                         suspend_report_result(dev->class->suspend, error);
642                 }
643                 if (error)
644                         goto End;
645         }
646
647         if (dev->type) {
648                 if (dev->type->pm) {
649                         pm_dev_dbg(dev, state, "type ");
650                         error = pm_op(dev, dev->type->pm, state);
651                 } else if (dev->type->suspend) {
652                         pm_dev_dbg(dev, state, "legacy type ");
653                         error = dev->type->suspend(dev, state);
654                         suspend_report_result(dev->type->suspend, error);
655                 }
656                 if (error)
657                         goto End;
658         }
659
660         if (dev->bus) {
661                 if (dev->bus->pm) {
662                         pm_dev_dbg(dev, state, "");
663                         error = pm_op(dev, dev->bus->pm, state);
664                 } else if (dev->bus->suspend) {
665                         pm_dev_dbg(dev, state, "legacy ");
666                         error = dev->bus->suspend(dev, state);
667                         suspend_report_result(dev->bus->suspend, error);
668                 }
669         }
670  End:
671         up(&dev->sem);
672
673         return error;
674 }
675
676 /**
677  *      dpm_suspend - Suspend every device.
678  *      @state: PM transition of the system being carried out.
679  *
680  *      Execute the appropriate "suspend" callbacks for all devices.
681  */
682 static int dpm_suspend(pm_message_t state)
683 {
684         struct list_head list;
685         int error = 0;
686
687         INIT_LIST_HEAD(&list);
688         mutex_lock(&dpm_list_mtx);
689         while (!list_empty(&dpm_list)) {
690                 struct device *dev = to_device(dpm_list.prev);
691
692                 get_device(dev);
693                 mutex_unlock(&dpm_list_mtx);
694
695                 error = device_suspend(dev, state);
696
697                 mutex_lock(&dpm_list_mtx);
698                 if (error) {
699                         pm_dev_err(dev, state, "", error);
700                         put_device(dev);
701                         break;
702                 }
703                 dev->power.status = DPM_OFF;
704                 if (!list_empty(&dev->power.entry))
705                         list_move(&dev->power.entry, &list);
706                 put_device(dev);
707         }
708         list_splice(&list, dpm_list.prev);
709         mutex_unlock(&dpm_list_mtx);
710         return error;
711 }
712
713 /**
714  *      device_prepare - Execute the ->prepare() callback(s) for given device.
715  *      @dev:   Device.
716  *      @state: PM transition of the system being carried out.
717  */
718 static int device_prepare(struct device *dev, pm_message_t state)
719 {
720         int error = 0;
721
722         down(&dev->sem);
723
724         if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
725                 pm_dev_dbg(dev, state, "preparing ");
726                 error = dev->bus->pm->prepare(dev);
727                 suspend_report_result(dev->bus->pm->prepare, error);
728                 if (error)
729                         goto End;
730         }
731
732         if (dev->type && dev->type->pm && dev->type->pm->prepare) {
733                 pm_dev_dbg(dev, state, "preparing type ");
734                 error = dev->type->pm->prepare(dev);
735                 suspend_report_result(dev->type->pm->prepare, error);
736                 if (error)
737                         goto End;
738         }
739
740         if (dev->class && dev->class->pm && dev->class->pm->prepare) {
741                 pm_dev_dbg(dev, state, "preparing class ");
742                 error = dev->class->pm->prepare(dev);
743                 suspend_report_result(dev->class->pm->prepare, error);
744         }
745  End:
746         up(&dev->sem);
747
748         return error;
749 }
750
751 /**
752  *      dpm_prepare - Prepare all devices for a PM transition.
753  *      @state: PM transition of the system being carried out.
754  *
755  *      Execute the ->prepare() callback for all devices.
756  */
757 static int dpm_prepare(pm_message_t state)
758 {
759         struct list_head list;
760         int error = 0;
761
762         INIT_LIST_HEAD(&list);
763         mutex_lock(&dpm_list_mtx);
764         transition_started = true;
765         while (!list_empty(&dpm_list)) {
766                 struct device *dev = to_device(dpm_list.next);
767
768                 get_device(dev);
769                 dev->power.status = DPM_PREPARING;
770                 mutex_unlock(&dpm_list_mtx);
771
772                 error = device_prepare(dev, state);
773
774                 mutex_lock(&dpm_list_mtx);
775                 if (error) {
776                         dev->power.status = DPM_ON;
777                         if (error == -EAGAIN) {
778                                 put_device(dev);
779                                 continue;
780                         }
781                         printk(KERN_ERR "PM: Failed to prepare device %s "
782                                 "for power transition: error %d\n",
783                                 kobject_name(&dev->kobj), error);
784                         put_device(dev);
785                         break;
786                 }
787                 dev->power.status = DPM_SUSPENDING;
788                 if (!list_empty(&dev->power.entry))
789                         list_move_tail(&dev->power.entry, &list);
790                 put_device(dev);
791         }
792         list_splice(&list, &dpm_list);
793         mutex_unlock(&dpm_list_mtx);
794         return error;
795 }
796
797 /**
798  *      dpm_suspend_start - Save state and stop all devices in system.
799  *      @state: PM transition of the system being carried out.
800  *
801  *      Prepare and suspend all devices.
802  */
803 int dpm_suspend_start(pm_message_t state)
804 {
805         int error;
806
807         might_sleep();
808         error = dpm_prepare(state);
809         if (!error)
810                 error = dpm_suspend(state);
811         return error;
812 }
813 EXPORT_SYMBOL_GPL(dpm_suspend_start);
814
815 void __suspend_report_result(const char *function, void *fn, int ret)
816 {
817         if (ret)
818                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
819 }
820 EXPORT_SYMBOL_GPL(__suspend_report_result);