2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
9 #include <linux/init.h>
10 #include <linux/kernel.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/suspend.h>
19 #include <linux/export.h>
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
23 type (*__routine)(struct device *__d); \
24 type __ret = (type)0; \
26 __routine = genpd->dev_ops.callback; \
28 __ret = __routine(dev); \
30 __routine = dev_gpd_data(dev)->ops.callback; \
32 __ret = __routine(dev); \
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
39 ktime_t __start = ktime_get(); \
40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43 if (!__retval && __elapsed > __td->field) { \
44 __td->field = __elapsed; \
45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
53 static LIST_HEAD(gpd_list);
54 static DEFINE_MUTEX(gpd_list_lock);
58 struct generic_pm_domain *dev_to_genpd(struct device *dev)
60 if (IS_ERR_OR_NULL(dev->pm_domain))
61 return ERR_PTR(-EINVAL);
63 return pd_to_genpd(dev->pm_domain);
66 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
68 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
69 stop_latency_ns, "stop");
72 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
74 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
75 start_latency_ns, "start");
78 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
80 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
81 save_state_latency_ns, "state save");
84 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
87 restore_state_latency_ns,
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
95 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96 ret = !!atomic_dec_and_test(&genpd->sd_count);
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
103 atomic_inc(&genpd->sd_count);
104 smp_mb__after_atomic_inc();
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
111 mutex_lock(&genpd->lock);
113 * Wait for the domain to transition into either the active,
114 * or the power off state.
117 prepare_to_wait(&genpd->status_wait_queue, &wait,
118 TASK_UNINTERRUPTIBLE);
119 if (genpd->status == GPD_STATE_ACTIVE
120 || genpd->status == GPD_STATE_POWER_OFF)
122 mutex_unlock(&genpd->lock);
126 mutex_lock(&genpd->lock);
128 finish_wait(&genpd->status_wait_queue, &wait);
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
133 mutex_unlock(&genpd->lock);
136 static void genpd_set_active(struct generic_pm_domain *genpd)
138 if (genpd->resume_count == 0)
139 genpd->status = GPD_STATE_ACTIVE;
143 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
144 * @genpd: PM domain to power up.
146 * Restore power to @genpd and all of its masters so that it is possible to
147 * resume a device belonging to it.
149 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
150 __releases(&genpd->lock) __acquires(&genpd->lock)
152 struct gpd_link *link;
156 /* If the domain's master is being waited for, we have to wait too. */
158 prepare_to_wait(&genpd->status_wait_queue, &wait,
159 TASK_UNINTERRUPTIBLE);
160 if (genpd->status != GPD_STATE_WAIT_MASTER)
162 mutex_unlock(&genpd->lock);
166 mutex_lock(&genpd->lock);
168 finish_wait(&genpd->status_wait_queue, &wait);
170 if (genpd->status == GPD_STATE_ACTIVE
171 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
174 if (genpd->status != GPD_STATE_POWER_OFF) {
175 genpd_set_active(genpd);
180 * The list is guaranteed not to change while the loop below is being
181 * executed, unless one of the masters' .power_on() callbacks fiddles
184 list_for_each_entry(link, &genpd->slave_links, slave_node) {
185 genpd_sd_counter_inc(link->master);
186 genpd->status = GPD_STATE_WAIT_MASTER;
188 mutex_unlock(&genpd->lock);
190 ret = pm_genpd_poweron(link->master);
192 mutex_lock(&genpd->lock);
195 * The "wait for parent" status is guaranteed not to change
196 * while the master is powering on.
198 genpd->status = GPD_STATE_POWER_OFF;
199 wake_up_all(&genpd->status_wait_queue);
201 genpd_sd_counter_dec(link->master);
206 if (genpd->power_on) {
207 ktime_t time_start = ktime_get();
210 ret = genpd->power_on(genpd);
214 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
215 if (elapsed_ns > genpd->power_on_latency_ns) {
216 genpd->power_on_latency_ns = elapsed_ns;
217 genpd->max_off_time_changed = true;
219 pr_warning("%s: Power-on latency exceeded, "
220 "new value %lld ns\n", genpd->name,
225 genpd_set_active(genpd);
230 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
231 genpd_sd_counter_dec(link->master);
237 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
238 * @genpd: PM domain to power up.
240 int pm_genpd_poweron(struct generic_pm_domain *genpd)
244 mutex_lock(&genpd->lock);
245 ret = __pm_genpd_poweron(genpd);
246 mutex_unlock(&genpd->lock);
250 #endif /* CONFIG_PM */
252 #ifdef CONFIG_PM_RUNTIME
254 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255 unsigned long val, void *ptr)
257 struct generic_pm_domain_data *gpd_data;
260 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
262 mutex_lock(&gpd_data->lock);
263 dev = gpd_data->base.dev;
265 mutex_unlock(&gpd_data->lock);
268 mutex_unlock(&gpd_data->lock);
271 struct generic_pm_domain *genpd;
272 struct pm_domain_data *pdd;
274 spin_lock_irq(&dev->power.lock);
276 pdd = dev->power.subsys_data ?
277 dev->power.subsys_data->domain_data : NULL;
279 to_gpd_data(pdd)->td.constraint_changed = true;
280 genpd = dev_to_genpd(dev);
282 genpd = ERR_PTR(-ENODATA);
285 spin_unlock_irq(&dev->power.lock);
287 if (!IS_ERR(genpd)) {
288 mutex_lock(&genpd->lock);
289 genpd->max_off_time_changed = true;
290 mutex_unlock(&genpd->lock);
294 if (!dev || dev->power.ignore_children)
302 * __pm_genpd_save_device - Save the pre-suspend state of a device.
303 * @pdd: Domain data of the device to save the state of.
304 * @genpd: PM domain the device belongs to.
306 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
307 struct generic_pm_domain *genpd)
308 __releases(&genpd->lock) __acquires(&genpd->lock)
310 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
311 struct device *dev = pdd->dev;
314 if (gpd_data->need_restore)
317 mutex_unlock(&genpd->lock);
319 genpd_start_dev(genpd, dev);
320 ret = genpd_save_dev(genpd, dev);
321 genpd_stop_dev(genpd, dev);
323 mutex_lock(&genpd->lock);
326 gpd_data->need_restore = true;
332 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
333 * @pdd: Domain data of the device to restore the state of.
334 * @genpd: PM domain the device belongs to.
336 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
337 struct generic_pm_domain *genpd)
338 __releases(&genpd->lock) __acquires(&genpd->lock)
340 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
341 struct device *dev = pdd->dev;
343 if (!gpd_data->need_restore)
346 mutex_unlock(&genpd->lock);
348 genpd_start_dev(genpd, dev);
349 genpd_restore_dev(genpd, dev);
350 genpd_stop_dev(genpd, dev);
352 mutex_lock(&genpd->lock);
354 gpd_data->need_restore = false;
358 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
359 * @genpd: PM domain to check.
361 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
362 * a "power off" operation, which means that a "power on" has occured in the
363 * meantime, or if its resume_count field is different from zero, which means
364 * that one of its devices has been resumed in the meantime.
366 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
368 return genpd->status == GPD_STATE_WAIT_MASTER
369 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
373 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
374 * @genpd: PM domait to power off.
376 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
379 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
381 if (!work_pending(&genpd->power_off_work))
382 queue_work(pm_wq, &genpd->power_off_work);
386 * pm_genpd_poweroff - Remove power from a given PM domain.
387 * @genpd: PM domain to power down.
389 * If all of the @genpd's devices have been suspended and all of its subdomains
390 * have been powered down, run the runtime suspend callbacks provided by all of
391 * the @genpd's devices' drivers and remove power from @genpd.
393 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
394 __releases(&genpd->lock) __acquires(&genpd->lock)
396 struct pm_domain_data *pdd;
397 struct gpd_link *link;
398 unsigned int not_suspended;
403 * Do not try to power off the domain in the following situations:
404 * (1) The domain is already in the "power off" state.
405 * (2) The domain is waiting for its master to power up.
406 * (3) One of the domain's devices is being resumed right now.
407 * (4) System suspend is in progress.
409 if (genpd->status == GPD_STATE_POWER_OFF
410 || genpd->status == GPD_STATE_WAIT_MASTER
411 || genpd->resume_count > 0 || genpd->prepared_count > 0)
414 if (atomic_read(&genpd->sd_count) > 0)
418 list_for_each_entry(pdd, &genpd->dev_list, list_node)
419 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
420 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
423 if (not_suspended > genpd->in_progress)
426 if (genpd->poweroff_task) {
428 * Another instance of pm_genpd_poweroff() is executing
429 * callbacks, so tell it to start over and return.
431 genpd->status = GPD_STATE_REPEAT;
435 if (genpd->gov && genpd->gov->power_down_ok) {
436 if (!genpd->gov->power_down_ok(&genpd->domain))
440 genpd->status = GPD_STATE_BUSY;
441 genpd->poweroff_task = current;
443 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
444 ret = atomic_read(&genpd->sd_count) == 0 ?
445 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
447 if (genpd_abort_poweroff(genpd))
451 genpd_set_active(genpd);
455 if (genpd->status == GPD_STATE_REPEAT) {
456 genpd->poweroff_task = NULL;
461 if (genpd->power_off) {
465 if (atomic_read(&genpd->sd_count) > 0) {
470 time_start = ktime_get();
473 * If sd_count > 0 at this point, one of the subdomains hasn't
474 * managed to call pm_genpd_poweron() for the master yet after
475 * incrementing it. In that case pm_genpd_poweron() will wait
476 * for us to drop the lock, so we can call .power_off() and let
477 * the pm_genpd_poweron() restore power for us (this shouldn't
478 * happen very often).
480 ret = genpd->power_off(genpd);
482 genpd_set_active(genpd);
486 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
487 if (elapsed_ns > genpd->power_off_latency_ns) {
488 genpd->power_off_latency_ns = elapsed_ns;
489 genpd->max_off_time_changed = true;
491 pr_warning("%s: Power-off latency exceeded, "
492 "new value %lld ns\n", genpd->name,
497 genpd->status = GPD_STATE_POWER_OFF;
499 list_for_each_entry(link, &genpd->slave_links, slave_node) {
500 genpd_sd_counter_dec(link->master);
501 genpd_queue_power_off_work(link->master);
505 genpd->poweroff_task = NULL;
506 wake_up_all(&genpd->status_wait_queue);
511 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
512 * @work: Work structure used for scheduling the execution of this function.
514 static void genpd_power_off_work_fn(struct work_struct *work)
516 struct generic_pm_domain *genpd;
518 genpd = container_of(work, struct generic_pm_domain, power_off_work);
520 genpd_acquire_lock(genpd);
521 pm_genpd_poweroff(genpd);
522 genpd_release_lock(genpd);
526 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
527 * @dev: Device to suspend.
529 * Carry out a runtime suspend of a device under the assumption that its
530 * pm_domain field points to the domain member of an object of type
531 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
533 static int pm_genpd_runtime_suspend(struct device *dev)
535 struct generic_pm_domain *genpd;
536 bool (*stop_ok)(struct device *__dev);
539 dev_dbg(dev, "%s()\n", __func__);
541 genpd = dev_to_genpd(dev);
545 might_sleep_if(!genpd->dev_irq_safe);
547 if (dev_gpd_data(dev)->always_on)
550 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
551 if (stop_ok && !stop_ok(dev))
554 ret = genpd_stop_dev(genpd, dev);
559 * If power.irq_safe is set, this routine will be run with interrupts
560 * off, so it can't use mutexes.
562 if (dev->power.irq_safe)
565 mutex_lock(&genpd->lock);
566 genpd->in_progress++;
567 pm_genpd_poweroff(genpd);
568 genpd->in_progress--;
569 mutex_unlock(&genpd->lock);
575 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
576 * @dev: Device to resume.
578 * Carry out a runtime resume of a device under the assumption that its
579 * pm_domain field points to the domain member of an object of type
580 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
582 static int pm_genpd_runtime_resume(struct device *dev)
584 struct generic_pm_domain *genpd;
588 dev_dbg(dev, "%s()\n", __func__);
590 genpd = dev_to_genpd(dev);
594 might_sleep_if(!genpd->dev_irq_safe);
596 /* If power.irq_safe, the PM domain is never powered off. */
597 if (dev->power.irq_safe)
600 mutex_lock(&genpd->lock);
601 ret = __pm_genpd_poweron(genpd);
603 mutex_unlock(&genpd->lock);
606 genpd->status = GPD_STATE_BUSY;
607 genpd->resume_count++;
609 prepare_to_wait(&genpd->status_wait_queue, &wait,
610 TASK_UNINTERRUPTIBLE);
612 * If current is the powering off task, we have been called
613 * reentrantly from one of the device callbacks, so we should
616 if (!genpd->poweroff_task || genpd->poweroff_task == current)
618 mutex_unlock(&genpd->lock);
622 mutex_lock(&genpd->lock);
624 finish_wait(&genpd->status_wait_queue, &wait);
625 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
626 genpd->resume_count--;
627 genpd_set_active(genpd);
628 wake_up_all(&genpd->status_wait_queue);
629 mutex_unlock(&genpd->lock);
632 genpd_start_dev(genpd, dev);
638 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
640 void pm_genpd_poweroff_unused(void)
642 struct generic_pm_domain *genpd;
644 mutex_lock(&gpd_list_lock);
646 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
647 genpd_queue_power_off_work(genpd);
649 mutex_unlock(&gpd_list_lock);
654 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
655 unsigned long val, void *ptr)
660 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
662 #define pm_genpd_runtime_suspend NULL
663 #define pm_genpd_runtime_resume NULL
665 #endif /* CONFIG_PM_RUNTIME */
667 #ifdef CONFIG_PM_SLEEP
669 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
672 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
675 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
677 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
680 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
682 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
685 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
687 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
690 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
692 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
695 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
697 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
700 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
702 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
705 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
707 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
710 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
712 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
716 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
717 * @genpd: PM domain to power off, if possible.
719 * Check if the given PM domain can be powered off (during system suspend or
720 * hibernation) and do that if so. Also, in that case propagate to its masters.
722 * This function is only called in "noirq" stages of system power transitions,
723 * so it need not acquire locks (all of the "noirq" callbacks are executed
724 * sequentially, so it is guaranteed that it will never run twice in parallel).
726 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
728 struct gpd_link *link;
730 if (genpd->status == GPD_STATE_POWER_OFF)
733 if (genpd->suspended_count != genpd->device_count
734 || atomic_read(&genpd->sd_count) > 0)
737 if (genpd->power_off)
738 genpd->power_off(genpd);
740 genpd->status = GPD_STATE_POWER_OFF;
742 list_for_each_entry(link, &genpd->slave_links, slave_node) {
743 genpd_sd_counter_dec(link->master);
744 pm_genpd_sync_poweroff(link->master);
749 * resume_needed - Check whether to resume a device before system suspend.
750 * @dev: Device to check.
751 * @genpd: PM domain the device belongs to.
753 * There are two cases in which a device that can wake up the system from sleep
754 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
755 * to wake up the system and it has to remain active for this purpose while the
756 * system is in the sleep state and (2) if the device is not enabled to wake up
757 * the system from sleep states and it generally doesn't generate wakeup signals
758 * by itself (those signals are generated on its behalf by other parts of the
759 * system). In the latter case it may be necessary to reconfigure the device's
760 * wakeup settings during system suspend, because it may have been set up to
761 * signal remote wakeup from the system's working state as needed by runtime PM.
762 * Return 'true' in either of the above cases.
764 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
768 if (!device_can_wakeup(dev))
771 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
772 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
776 * pm_genpd_prepare - Start power transition of a device in a PM domain.
777 * @dev: Device to start the transition of.
779 * Start a power transition of a device (during a system-wide power transition)
780 * under the assumption that its pm_domain field points to the domain member of
781 * an object of type struct generic_pm_domain representing a PM domain
782 * consisting of I/O devices.
784 static int pm_genpd_prepare(struct device *dev)
786 struct generic_pm_domain *genpd;
789 dev_dbg(dev, "%s()\n", __func__);
791 genpd = dev_to_genpd(dev);
796 * If a wakeup request is pending for the device, it should be woken up
797 * at this point and a system wakeup event should be reported if it's
798 * set up to wake up the system from sleep states.
800 pm_runtime_get_noresume(dev);
801 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
802 pm_wakeup_event(dev, 0);
804 if (pm_wakeup_pending()) {
805 pm_runtime_put_sync(dev);
809 if (resume_needed(dev, genpd))
810 pm_runtime_resume(dev);
812 genpd_acquire_lock(genpd);
814 if (genpd->prepared_count++ == 0) {
815 genpd->suspended_count = 0;
816 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
819 genpd_release_lock(genpd);
821 if (genpd->suspend_power_off) {
822 pm_runtime_put_noidle(dev);
827 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
828 * so pm_genpd_poweron() will return immediately, but if the device
829 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
830 * to make it operational.
832 pm_runtime_resume(dev);
833 __pm_runtime_disable(dev, false);
835 ret = pm_generic_prepare(dev);
837 mutex_lock(&genpd->lock);
839 if (--genpd->prepared_count == 0)
840 genpd->suspend_power_off = false;
842 mutex_unlock(&genpd->lock);
843 pm_runtime_enable(dev);
846 pm_runtime_put_sync(dev);
851 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
852 * @dev: Device to suspend.
854 * Suspend a device under the assumption that its pm_domain field points to the
855 * domain member of an object of type struct generic_pm_domain representing
856 * a PM domain consisting of I/O devices.
858 static int pm_genpd_suspend(struct device *dev)
860 struct generic_pm_domain *genpd;
862 dev_dbg(dev, "%s()\n", __func__);
864 genpd = dev_to_genpd(dev);
868 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
872 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
873 * @dev: Device to suspend.
875 * Carry out a late suspend of a device under the assumption that its
876 * pm_domain field points to the domain member of an object of type
877 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
879 static int pm_genpd_suspend_late(struct device *dev)
881 struct generic_pm_domain *genpd;
883 dev_dbg(dev, "%s()\n", __func__);
885 genpd = dev_to_genpd(dev);
889 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
893 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
894 * @dev: Device to suspend.
896 * Stop the device and remove power from the domain if all devices in it have
899 static int pm_genpd_suspend_noirq(struct device *dev)
901 struct generic_pm_domain *genpd;
903 dev_dbg(dev, "%s()\n", __func__);
905 genpd = dev_to_genpd(dev);
909 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
910 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
913 genpd_stop_dev(genpd, dev);
916 * Since all of the "noirq" callbacks are executed sequentially, it is
917 * guaranteed that this function will never run twice in parallel for
918 * the same PM domain, so it is not necessary to use locking here.
920 genpd->suspended_count++;
921 pm_genpd_sync_poweroff(genpd);
927 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
928 * @dev: Device to resume.
930 * Restore power to the device's PM domain, if necessary, and start the device.
932 static int pm_genpd_resume_noirq(struct device *dev)
934 struct generic_pm_domain *genpd;
936 dev_dbg(dev, "%s()\n", __func__);
938 genpd = dev_to_genpd(dev);
942 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
943 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
947 * Since all of the "noirq" callbacks are executed sequentially, it is
948 * guaranteed that this function will never run twice in parallel for
949 * the same PM domain, so it is not necessary to use locking here.
951 pm_genpd_poweron(genpd);
952 genpd->suspended_count--;
954 return genpd_start_dev(genpd, dev);
958 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
959 * @dev: Device to resume.
961 * Carry out an early resume of a device under the assumption that its
962 * pm_domain field points to the domain member of an object of type
963 * struct generic_pm_domain representing a power domain consisting of I/O
966 static int pm_genpd_resume_early(struct device *dev)
968 struct generic_pm_domain *genpd;
970 dev_dbg(dev, "%s()\n", __func__);
972 genpd = dev_to_genpd(dev);
976 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
980 * pm_genpd_resume - Resume of device in an I/O PM domain.
981 * @dev: Device to resume.
983 * Resume a device under the assumption that its pm_domain field points to the
984 * domain member of an object of type struct generic_pm_domain representing
985 * a power domain consisting of I/O devices.
987 static int pm_genpd_resume(struct device *dev)
989 struct generic_pm_domain *genpd;
991 dev_dbg(dev, "%s()\n", __func__);
993 genpd = dev_to_genpd(dev);
997 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1001 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1002 * @dev: Device to freeze.
1004 * Freeze a device under the assumption that its pm_domain field points to the
1005 * domain member of an object of type struct generic_pm_domain representing
1006 * a power domain consisting of I/O devices.
1008 static int pm_genpd_freeze(struct device *dev)
1010 struct generic_pm_domain *genpd;
1012 dev_dbg(dev, "%s()\n", __func__);
1014 genpd = dev_to_genpd(dev);
1018 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1022 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1023 * @dev: Device to freeze.
1025 * Carry out a late freeze of a device under the assumption that its
1026 * pm_domain field points to the domain member of an object of type
1027 * struct generic_pm_domain representing a power domain consisting of I/O
1030 static int pm_genpd_freeze_late(struct device *dev)
1032 struct generic_pm_domain *genpd;
1034 dev_dbg(dev, "%s()\n", __func__);
1036 genpd = dev_to_genpd(dev);
1040 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1044 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1045 * @dev: Device to freeze.
1047 * Carry out a late freeze of a device under the assumption that its
1048 * pm_domain field points to the domain member of an object of type
1049 * struct generic_pm_domain representing a power domain consisting of I/O
1052 static int pm_genpd_freeze_noirq(struct device *dev)
1054 struct generic_pm_domain *genpd;
1056 dev_dbg(dev, "%s()\n", __func__);
1058 genpd = dev_to_genpd(dev);
1062 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1063 0 : genpd_stop_dev(genpd, dev);
1067 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1068 * @dev: Device to thaw.
1070 * Start the device, unless power has been removed from the domain already
1071 * before the system transition.
1073 static int pm_genpd_thaw_noirq(struct device *dev)
1075 struct generic_pm_domain *genpd;
1077 dev_dbg(dev, "%s()\n", __func__);
1079 genpd = dev_to_genpd(dev);
1083 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1084 0 : genpd_start_dev(genpd, dev);
1088 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1089 * @dev: Device to thaw.
1091 * Carry out an early thaw of a device under the assumption that its
1092 * pm_domain field points to the domain member of an object of type
1093 * struct generic_pm_domain representing a power domain consisting of I/O
1096 static int pm_genpd_thaw_early(struct device *dev)
1098 struct generic_pm_domain *genpd;
1100 dev_dbg(dev, "%s()\n", __func__);
1102 genpd = dev_to_genpd(dev);
1106 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1110 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1111 * @dev: Device to thaw.
1113 * Thaw a device under the assumption that its pm_domain field points to the
1114 * domain member of an object of type struct generic_pm_domain representing
1115 * a power domain consisting of I/O devices.
1117 static int pm_genpd_thaw(struct device *dev)
1119 struct generic_pm_domain *genpd;
1121 dev_dbg(dev, "%s()\n", __func__);
1123 genpd = dev_to_genpd(dev);
1127 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1131 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1132 * @dev: Device to resume.
1134 * Make sure the domain will be in the same power state as before the
1135 * hibernation the system is resuming from and start the device if necessary.
1137 static int pm_genpd_restore_noirq(struct device *dev)
1139 struct generic_pm_domain *genpd;
1141 dev_dbg(dev, "%s()\n", __func__);
1143 genpd = dev_to_genpd(dev);
1148 * Since all of the "noirq" callbacks are executed sequentially, it is
1149 * guaranteed that this function will never run twice in parallel for
1150 * the same PM domain, so it is not necessary to use locking here.
1152 * At this point suspended_count == 0 means we are being run for the
1153 * first time for the given domain in the present cycle.
1155 if (genpd->suspended_count++ == 0) {
1157 * The boot kernel might put the domain into arbitrary state,
1158 * so make it appear as powered off to pm_genpd_poweron(), so
1159 * that it tries to power it on in case it was really off.
1161 genpd->status = GPD_STATE_POWER_OFF;
1162 if (genpd->suspend_power_off) {
1164 * If the domain was off before the hibernation, make
1165 * sure it will be off going forward.
1167 if (genpd->power_off)
1168 genpd->power_off(genpd);
1174 if (genpd->suspend_power_off)
1177 pm_genpd_poweron(genpd);
1179 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
1183 * pm_genpd_complete - Complete power transition of a device in a power domain.
1184 * @dev: Device to complete the transition of.
1186 * Complete a power transition of a device (during a system-wide power
1187 * transition) under the assumption that its pm_domain field points to the
1188 * domain member of an object of type struct generic_pm_domain representing
1189 * a power domain consisting of I/O devices.
1191 static void pm_genpd_complete(struct device *dev)
1193 struct generic_pm_domain *genpd;
1196 dev_dbg(dev, "%s()\n", __func__);
1198 genpd = dev_to_genpd(dev);
1202 mutex_lock(&genpd->lock);
1204 run_complete = !genpd->suspend_power_off;
1205 if (--genpd->prepared_count == 0)
1206 genpd->suspend_power_off = false;
1208 mutex_unlock(&genpd->lock);
1211 pm_generic_complete(dev);
1212 pm_runtime_set_active(dev);
1213 pm_runtime_enable(dev);
1214 pm_runtime_idle(dev);
1220 #define pm_genpd_prepare NULL
1221 #define pm_genpd_suspend NULL
1222 #define pm_genpd_suspend_late NULL
1223 #define pm_genpd_suspend_noirq NULL
1224 #define pm_genpd_resume_early NULL
1225 #define pm_genpd_resume_noirq NULL
1226 #define pm_genpd_resume NULL
1227 #define pm_genpd_freeze NULL
1228 #define pm_genpd_freeze_late NULL
1229 #define pm_genpd_freeze_noirq NULL
1230 #define pm_genpd_thaw_early NULL
1231 #define pm_genpd_thaw_noirq NULL
1232 #define pm_genpd_thaw NULL
1233 #define pm_genpd_restore_noirq NULL
1234 #define pm_genpd_complete NULL
1236 #endif /* CONFIG_PM_SLEEP */
1239 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1240 * @genpd: PM domain to add the device to.
1241 * @dev: Device to be added.
1242 * @td: Set of PM QoS timing parameters to attach to the device.
1244 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1245 struct gpd_timing_data *td)
1247 struct generic_pm_domain_data *gpd_data;
1248 struct pm_domain_data *pdd;
1251 dev_dbg(dev, "%s()\n", __func__);
1253 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1256 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1260 mutex_init(&gpd_data->lock);
1261 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1264 genpd_acquire_lock(genpd);
1266 if (genpd->prepared_count > 0) {
1271 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1272 if (pdd->dev == dev) {
1277 genpd->device_count++;
1278 genpd->max_off_time_changed = true;
1280 dev_pm_get_subsys_data(dev);
1282 mutex_lock(&gpd_data->lock);
1283 spin_lock_irq(&dev->power.lock);
1284 dev->pm_domain = &genpd->domain;
1285 dev->power.subsys_data->domain_data = &gpd_data->base;
1286 gpd_data->base.dev = dev;
1287 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1288 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1292 gpd_data->td.constraint_changed = true;
1293 gpd_data->td.effective_constraint_ns = -1;
1294 spin_unlock_irq(&dev->power.lock);
1295 mutex_unlock(&gpd_data->lock);
1297 genpd_release_lock(genpd);
1302 genpd_release_lock(genpd);
1304 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1310 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1311 * @genpd_node: Device tree node pointer representing a PM domain to which the
1312 * the device is added to.
1313 * @dev: Device to be added.
1314 * @td: Set of PM QoS timing parameters to attach to the device.
1316 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1317 struct gpd_timing_data *td)
1319 struct generic_pm_domain *genpd = NULL, *gpd;
1321 dev_dbg(dev, "%s()\n", __func__);
1323 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1326 mutex_lock(&gpd_list_lock);
1327 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1328 if (gpd->of_node == genpd_node) {
1333 mutex_unlock(&gpd_list_lock);
1338 return __pm_genpd_add_device(genpd, dev, td);
1342 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1343 * @genpd: PM domain to remove the device from.
1344 * @dev: Device to be removed.
1346 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1349 struct generic_pm_domain_data *gpd_data;
1350 struct pm_domain_data *pdd;
1353 dev_dbg(dev, "%s()\n", __func__);
1355 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1356 || IS_ERR_OR_NULL(dev->pm_domain)
1357 || pd_to_genpd(dev->pm_domain) != genpd)
1360 genpd_acquire_lock(genpd);
1362 if (genpd->prepared_count > 0) {
1367 genpd->device_count--;
1368 genpd->max_off_time_changed = true;
1370 spin_lock_irq(&dev->power.lock);
1371 dev->pm_domain = NULL;
1372 pdd = dev->power.subsys_data->domain_data;
1373 list_del_init(&pdd->list_node);
1374 dev->power.subsys_data->domain_data = NULL;
1375 spin_unlock_irq(&dev->power.lock);
1377 gpd_data = to_gpd_data(pdd);
1378 mutex_lock(&gpd_data->lock);
1380 mutex_unlock(&gpd_data->lock);
1382 genpd_release_lock(genpd);
1384 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1386 dev_pm_put_subsys_data(dev);
1390 genpd_release_lock(genpd);
1396 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1397 * @dev: Device to set/unset the flag for.
1398 * @val: The new value of the device's "always on" flag.
1400 void pm_genpd_dev_always_on(struct device *dev, bool val)
1402 struct pm_subsys_data *psd;
1403 unsigned long flags;
1405 spin_lock_irqsave(&dev->power.lock, flags);
1407 psd = dev_to_psd(dev);
1408 if (psd && psd->domain_data)
1409 to_gpd_data(psd->domain_data)->always_on = val;
1411 spin_unlock_irqrestore(&dev->power.lock, flags);
1413 EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1416 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1417 * @dev: Device to set/unset the flag for.
1418 * @val: The new value of the device's "need restore" flag.
1420 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1422 struct pm_subsys_data *psd;
1423 unsigned long flags;
1425 spin_lock_irqsave(&dev->power.lock, flags);
1427 psd = dev_to_psd(dev);
1428 if (psd && psd->domain_data)
1429 to_gpd_data(psd->domain_data)->need_restore = val;
1431 spin_unlock_irqrestore(&dev->power.lock, flags);
1433 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1436 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1437 * @genpd: Master PM domain to add the subdomain to.
1438 * @subdomain: Subdomain to be added.
1440 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1441 struct generic_pm_domain *subdomain)
1443 struct gpd_link *link;
1446 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1450 genpd_acquire_lock(genpd);
1451 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1453 if (subdomain->status != GPD_STATE_POWER_OFF
1454 && subdomain->status != GPD_STATE_ACTIVE) {
1455 mutex_unlock(&subdomain->lock);
1456 genpd_release_lock(genpd);
1460 if (genpd->status == GPD_STATE_POWER_OFF
1461 && subdomain->status != GPD_STATE_POWER_OFF) {
1466 list_for_each_entry(link, &genpd->master_links, master_node) {
1467 if (link->slave == subdomain && link->master == genpd) {
1473 link = kzalloc(sizeof(*link), GFP_KERNEL);
1478 link->master = genpd;
1479 list_add_tail(&link->master_node, &genpd->master_links);
1480 link->slave = subdomain;
1481 list_add_tail(&link->slave_node, &subdomain->slave_links);
1482 if (subdomain->status != GPD_STATE_POWER_OFF)
1483 genpd_sd_counter_inc(genpd);
1486 mutex_unlock(&subdomain->lock);
1487 genpd_release_lock(genpd);
1493 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1494 * @genpd: Master PM domain to remove the subdomain from.
1495 * @subdomain: Subdomain to be removed.
1497 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1498 struct generic_pm_domain *subdomain)
1500 struct gpd_link *link;
1503 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1507 genpd_acquire_lock(genpd);
1509 list_for_each_entry(link, &genpd->master_links, master_node) {
1510 if (link->slave != subdomain)
1513 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1515 if (subdomain->status != GPD_STATE_POWER_OFF
1516 && subdomain->status != GPD_STATE_ACTIVE) {
1517 mutex_unlock(&subdomain->lock);
1518 genpd_release_lock(genpd);
1522 list_del(&link->master_node);
1523 list_del(&link->slave_node);
1525 if (subdomain->status != GPD_STATE_POWER_OFF)
1526 genpd_sd_counter_dec(genpd);
1528 mutex_unlock(&subdomain->lock);
1534 genpd_release_lock(genpd);
1540 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1541 * @dev: Device to add the callbacks to.
1542 * @ops: Set of callbacks to add.
1543 * @td: Timing data to add to the device along with the callbacks (optional).
1545 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1546 struct gpd_timing_data *td)
1548 struct pm_domain_data *pdd;
1551 if (!(dev && dev->power.subsys_data && ops))
1554 pm_runtime_disable(dev);
1557 pdd = dev->power.subsys_data->domain_data;
1559 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1561 gpd_data->ops = *ops;
1569 pm_runtime_enable(dev);
1573 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1576 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1577 * @dev: Device to remove the callbacks from.
1578 * @clear_td: If set, clear the device's timing data too.
1580 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1582 struct pm_domain_data *pdd;
1585 if (!(dev && dev->power.subsys_data))
1588 pm_runtime_disable(dev);
1591 pdd = dev->power.subsys_data->domain_data;
1593 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1595 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1597 gpd_data->td = (struct gpd_timing_data){ 0 };
1603 pm_runtime_enable(dev);
1607 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1609 /* Default device callbacks for generic PM domains. */
1612 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1613 * @dev: Device to handle.
1615 static int pm_genpd_default_save_state(struct device *dev)
1617 int (*cb)(struct device *__dev);
1618 struct device_driver *drv = dev->driver;
1620 cb = dev_gpd_data(dev)->ops.save_state;
1624 if (drv && drv->pm && drv->pm->runtime_suspend)
1625 return drv->pm->runtime_suspend(dev);
1631 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1632 * @dev: Device to handle.
1634 static int pm_genpd_default_restore_state(struct device *dev)
1636 int (*cb)(struct device *__dev);
1637 struct device_driver *drv = dev->driver;
1639 cb = dev_gpd_data(dev)->ops.restore_state;
1643 if (drv && drv->pm && drv->pm->runtime_resume)
1644 return drv->pm->runtime_resume(dev);
1649 #ifdef CONFIG_PM_SLEEP
1652 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1653 * @dev: Device to handle.
1655 static int pm_genpd_default_suspend(struct device *dev)
1657 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1659 return cb ? cb(dev) : pm_generic_suspend(dev);
1663 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1664 * @dev: Device to handle.
1666 static int pm_genpd_default_suspend_late(struct device *dev)
1668 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1670 return cb ? cb(dev) : pm_generic_suspend_late(dev);
1674 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1675 * @dev: Device to handle.
1677 static int pm_genpd_default_resume_early(struct device *dev)
1679 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1681 return cb ? cb(dev) : pm_generic_resume_early(dev);
1685 * pm_genpd_default_resume - Default "device resume" for PM domians.
1686 * @dev: Device to handle.
1688 static int pm_genpd_default_resume(struct device *dev)
1690 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1692 return cb ? cb(dev) : pm_generic_resume(dev);
1696 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1697 * @dev: Device to handle.
1699 static int pm_genpd_default_freeze(struct device *dev)
1701 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1703 return cb ? cb(dev) : pm_generic_freeze(dev);
1707 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1708 * @dev: Device to handle.
1710 static int pm_genpd_default_freeze_late(struct device *dev)
1712 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1714 return cb ? cb(dev) : pm_generic_freeze_late(dev);
1718 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1719 * @dev: Device to handle.
1721 static int pm_genpd_default_thaw_early(struct device *dev)
1723 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1725 return cb ? cb(dev) : pm_generic_thaw_early(dev);
1729 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1730 * @dev: Device to handle.
1732 static int pm_genpd_default_thaw(struct device *dev)
1734 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1736 return cb ? cb(dev) : pm_generic_thaw(dev);
1739 #else /* !CONFIG_PM_SLEEP */
1741 #define pm_genpd_default_suspend NULL
1742 #define pm_genpd_default_suspend_late NULL
1743 #define pm_genpd_default_resume_early NULL
1744 #define pm_genpd_default_resume NULL
1745 #define pm_genpd_default_freeze NULL
1746 #define pm_genpd_default_freeze_late NULL
1747 #define pm_genpd_default_thaw_early NULL
1748 #define pm_genpd_default_thaw NULL
1750 #endif /* !CONFIG_PM_SLEEP */
1753 * pm_genpd_init - Initialize a generic I/O PM domain object.
1754 * @genpd: PM domain object to initialize.
1755 * @gov: PM domain governor to associate with the domain (may be NULL).
1756 * @is_off: Initial value of the domain's power_is_off field.
1758 void pm_genpd_init(struct generic_pm_domain *genpd,
1759 struct dev_power_governor *gov, bool is_off)
1761 if (IS_ERR_OR_NULL(genpd))
1764 INIT_LIST_HEAD(&genpd->master_links);
1765 INIT_LIST_HEAD(&genpd->slave_links);
1766 INIT_LIST_HEAD(&genpd->dev_list);
1767 mutex_init(&genpd->lock);
1769 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1770 genpd->in_progress = 0;
1771 atomic_set(&genpd->sd_count, 0);
1772 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1773 init_waitqueue_head(&genpd->status_wait_queue);
1774 genpd->poweroff_task = NULL;
1775 genpd->resume_count = 0;
1776 genpd->device_count = 0;
1777 genpd->max_off_time_ns = -1;
1778 genpd->max_off_time_changed = true;
1779 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1780 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1781 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1782 genpd->domain.ops.prepare = pm_genpd_prepare;
1783 genpd->domain.ops.suspend = pm_genpd_suspend;
1784 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1785 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1786 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1787 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1788 genpd->domain.ops.resume = pm_genpd_resume;
1789 genpd->domain.ops.freeze = pm_genpd_freeze;
1790 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1791 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1792 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1793 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1794 genpd->domain.ops.thaw = pm_genpd_thaw;
1795 genpd->domain.ops.poweroff = pm_genpd_suspend;
1796 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1797 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1798 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1799 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1800 genpd->domain.ops.restore = pm_genpd_resume;
1801 genpd->domain.ops.complete = pm_genpd_complete;
1802 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1803 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1804 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1805 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1806 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1807 genpd->dev_ops.resume = pm_genpd_default_resume;
1808 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1809 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1810 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1811 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1812 mutex_lock(&gpd_list_lock);
1813 list_add(&genpd->gpd_list_node, &gpd_list);
1814 mutex_unlock(&gpd_list_lock);