1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
19 typedef int (*pm_callback_t)(struct device *);
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
24 const struct dev_pm_ops *ops;
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
30 else if (dev->class && dev->class->pm)
32 else if (dev->bus && dev->bus->pm)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
65 static void update_pm_runtime_accounting(struct device *dev)
69 if (dev->power.disable_depth > 0)
72 last = dev->power.accounting_timestamp;
74 now = ktime_get_mono_fast_ns();
75 dev->power.accounting_timestamp = now;
78 * Because ktime_get_mono_fast_ns() is not monotonic during
79 * timekeeping updates, ensure that 'now' is after the last saved
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
90 dev->power.active_time += delta;
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
104 spin_lock_irqsave(&dev->power.lock, flags);
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
109 spin_unlock_irqrestore(&dev->power.lock, flags);
114 u64 pm_runtime_active_time(struct device *dev)
116 return rpm_get_accounted_time(dev, false);
119 u64 pm_runtime_suspended_time(struct device *dev)
121 return rpm_get_accounted_time(dev, true);
123 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127 * @dev: Device to handle.
129 static void pm_runtime_deactivate_timer(struct device *dev)
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139 * @dev: Device to handle.
141 static void pm_runtime_cancel_pending(struct device *dev)
143 pm_runtime_deactivate_timer(dev);
145 * In case there's a request pending, make sure its work function will
146 * return without doing anything.
148 dev->power.request = RPM_REQ_NONE;
152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153 * @dev: Device to handle.
155 * Compute the autosuspend-delay expiration time based on the device's
156 * power.last_busy time. If the delay has already expired or is disabled
157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
160 * This function may be called either with or without dev->power.lock held.
161 * Either way it can be racy, since power.last_busy may be updated at any time.
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
165 int autosuspend_delay;
168 if (!dev->power.use_autosuspend)
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172 if (autosuspend_delay < 0)
175 expires = READ_ONCE(dev->power.last_busy);
176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177 if (expires > ktime_get_mono_fast_ns())
178 return expires; /* Expires in the future */
182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
184 static int dev_memalloc_noio(struct device *dev, void *data)
186 return dev->power.memalloc_noio;
190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191 * @dev: Device to handle.
192 * @enable: True for setting the flag and False for clearing the flag.
194 * Set the flag for all devices in the path from the device to the
195 * root device in the device tree if @enable is true, otherwise clear
196 * the flag for devices in the path whose siblings don't set the flag.
198 * The function should only be called by block device, or network
199 * device driver for solving the deadlock problem during runtime
202 * If memory allocation with GFP_KERNEL is called inside runtime
203 * resume/suspend callback of any one of its ancestors(or the
204 * block device itself), the deadlock may be triggered inside the
205 * memory allocation since it might not complete until the block
206 * device becomes active and the involed page I/O finishes. The
207 * situation is pointed out first by Alan Stern. Network device
208 * are involved in iSCSI kind of situation.
210 * The lock of dev_hotplug_mutex is held in the function for handling
211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
214 * The function should be called between device_add() and device_del()
215 * on the affected device(block/network device).
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
219 static DEFINE_MUTEX(dev_hotplug_mutex);
221 mutex_lock(&dev_hotplug_mutex);
225 /* hold power lock since bitfield is not SMP-safe. */
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
232 * not need to enable ancestors any more if the device
235 if (enabled && enable)
241 * clear flag of the parent device only if all the
242 * children don't set the flag because ancestor's
243 * flag was set by any one of the descendants.
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL,
250 mutex_unlock(&dev_hotplug_mutex);
252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
256 * @dev: Device to test.
258 static int rpm_check_suspend_allowed(struct device *dev)
262 if (dev->power.runtime_error)
264 else if (dev->power.disable_depth > 0)
266 else if (atomic_read(&dev->power.usage_count))
268 else if (!dev->power.ignore_children &&
269 atomic_read(&dev->power.child_count))
272 /* Pending resume requests take precedence over suspends. */
273 else if ((dev->power.deferred_resume
274 && dev->power.runtime_status == RPM_SUSPENDING)
275 || (dev->power.request_pending
276 && dev->power.request == RPM_REQ_RESUME))
278 else if (__dev_pm_qos_resume_latency(dev) == 0)
280 else if (dev->power.runtime_status == RPM_SUSPENDED)
286 static int rpm_get_suppliers(struct device *dev)
288 struct device_link *link;
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291 device_links_read_lock_held()) {
294 if (!(link->flags & DL_FLAG_PM_RUNTIME))
297 retval = pm_runtime_get_sync(link->supplier);
298 /* Ignore suppliers with disabled runtime PM. */
299 if (retval < 0 && retval != -EACCES) {
300 pm_runtime_put_noidle(link->supplier);
303 refcount_inc(&link->rpm_active);
309 * pm_runtime_release_supplier - Drop references to device link's supplier.
310 * @link: Target device link.
311 * @check_idle: Whether or not to check if the supplier device is idle.
313 * Drop all runtime PM references associated with @link to its supplier device
314 * and if @check_idle is set, check if that device is idle (and so it can be
317 void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
319 struct device *supplier = link->supplier;
322 * The additional power.usage_count check is a safety net in case
323 * the rpm_active refcount becomes saturated, in which case
324 * refcount_dec_not_one() would return true forever, but it is not
325 * strictly necessary.
327 while (refcount_dec_not_one(&link->rpm_active) &&
328 atomic_read(&supplier->power.usage_count) > 0)
329 pm_runtime_put_noidle(supplier);
332 pm_request_idle(supplier);
335 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
337 struct device_link *link;
339 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
340 device_links_read_lock_held())
341 pm_runtime_release_supplier(link, try_to_suspend);
344 static void rpm_put_suppliers(struct device *dev)
346 __rpm_put_suppliers(dev, true);
349 static void rpm_suspend_suppliers(struct device *dev)
351 struct device_link *link;
352 int idx = device_links_read_lock();
354 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
355 device_links_read_lock_held())
356 pm_request_idle(link->supplier);
358 device_links_read_unlock(idx);
362 * __rpm_callback - Run a given runtime PM callback for a given device.
363 * @cb: Runtime PM callback to run.
364 * @dev: Device to run the callback for.
366 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
367 __releases(&dev->power.lock) __acquires(&dev->power.lock)
370 bool use_links = dev->power.links_count > 0;
372 if (dev->power.irq_safe) {
373 spin_unlock(&dev->power.lock);
375 spin_unlock_irq(&dev->power.lock);
378 * Resume suppliers if necessary.
380 * The device's runtime PM status cannot change until this
381 * routine returns, so it is safe to read the status outside of
384 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
385 idx = device_links_read_lock();
387 retval = rpm_get_suppliers(dev);
389 rpm_put_suppliers(dev);
393 device_links_read_unlock(idx);
400 if (dev->power.irq_safe) {
401 spin_lock(&dev->power.lock);
404 * If the device is suspending and the callback has returned
405 * success, drop the usage counters of the suppliers that have
406 * been reference counted on its resume.
408 * Do that if resume fails too.
411 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
412 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
413 idx = device_links_read_lock();
415 __rpm_put_suppliers(dev, false);
418 device_links_read_unlock(idx);
421 spin_lock_irq(&dev->power.lock);
428 * rpm_idle - Notify device bus type if the device can be suspended.
429 * @dev: Device to notify the bus type about.
430 * @rpmflags: Flag bits.
432 * Check if the device's runtime PM status allows it to be suspended. If
433 * another idle notification has been started earlier, return immediately. If
434 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
435 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
436 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
438 * This function must be called under dev->power.lock with interrupts disabled.
440 static int rpm_idle(struct device *dev, int rpmflags)
442 int (*callback)(struct device *);
445 trace_rpm_idle_rcuidle(dev, rpmflags);
446 retval = rpm_check_suspend_allowed(dev);
448 ; /* Conditions are wrong. */
450 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
451 else if (dev->power.runtime_status != RPM_ACTIVE)
455 * Any pending request other than an idle notification takes
456 * precedence over us, except that the timer may be running.
458 else if (dev->power.request_pending &&
459 dev->power.request > RPM_REQ_IDLE)
462 /* Act as though RPM_NOWAIT is always set. */
463 else if (dev->power.idle_notification)
464 retval = -EINPROGRESS;
468 /* Pending requests need to be canceled. */
469 dev->power.request = RPM_REQ_NONE;
471 callback = RPM_GET_CALLBACK(dev, runtime_idle);
473 /* If no callback assume success. */
474 if (!callback || dev->power.no_callbacks)
477 /* Carry out an asynchronous or a synchronous idle notification. */
478 if (rpmflags & RPM_ASYNC) {
479 dev->power.request = RPM_REQ_IDLE;
480 if (!dev->power.request_pending) {
481 dev->power.request_pending = true;
482 queue_work(pm_wq, &dev->power.work);
484 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
488 dev->power.idle_notification = true;
490 retval = __rpm_callback(callback, dev);
492 dev->power.idle_notification = false;
493 wake_up_all(&dev->power.wait_queue);
496 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
497 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
501 * rpm_callback - Run a given runtime PM callback for a given device.
502 * @cb: Runtime PM callback to run.
503 * @dev: Device to run the callback for.
505 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
509 if (dev->power.memalloc_noio) {
510 unsigned int noio_flag;
513 * Deadlock might be caused if memory allocation with
514 * GFP_KERNEL happens inside runtime_suspend and
515 * runtime_resume callbacks of one block device's
516 * ancestor or the block device itself. Network
517 * device might be thought as part of iSCSI block
518 * device, so network device and its ancestor should
519 * be marked as memalloc_noio too.
521 noio_flag = memalloc_noio_save();
522 retval = __rpm_callback(cb, dev);
523 memalloc_noio_restore(noio_flag);
525 retval = __rpm_callback(cb, dev);
528 dev->power.runtime_error = retval;
529 return retval != -EACCES ? retval : -EIO;
533 * rpm_suspend - Carry out runtime suspend of given device.
534 * @dev: Device to suspend.
535 * @rpmflags: Flag bits.
537 * Check if the device's runtime PM status allows it to be suspended.
538 * Cancel a pending idle notification, autosuspend or suspend. If
539 * another suspend has been started earlier, either return immediately
540 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
541 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
542 * otherwise run the ->runtime_suspend() callback directly. When
543 * ->runtime_suspend succeeded, if a deferred resume was requested while
544 * the callback was running then carry it out, otherwise send an idle
545 * notification for its parent (if the suspend succeeded and both
546 * ignore_children of parent->power and irq_safe of dev->power are not set).
547 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
548 * flag is set and the next autosuspend-delay expiration time is in the
549 * future, schedule another autosuspend attempt.
551 * This function must be called under dev->power.lock with interrupts disabled.
553 static int rpm_suspend(struct device *dev, int rpmflags)
554 __releases(&dev->power.lock) __acquires(&dev->power.lock)
556 int (*callback)(struct device *);
557 struct device *parent = NULL;
560 trace_rpm_suspend_rcuidle(dev, rpmflags);
563 retval = rpm_check_suspend_allowed(dev);
565 goto out; /* Conditions are wrong. */
567 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
568 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
573 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
574 if ((rpmflags & RPM_AUTO)
575 && dev->power.runtime_status != RPM_SUSPENDING) {
576 u64 expires = pm_runtime_autosuspend_expiration(dev);
579 /* Pending requests need to be canceled. */
580 dev->power.request = RPM_REQ_NONE;
583 * Optimization: If the timer is already running and is
584 * set to expire at or before the autosuspend delay,
585 * avoid the overhead of resetting it. Just let it
586 * expire; pm_suspend_timer_fn() will take care of the
589 if (!(dev->power.timer_expires &&
590 dev->power.timer_expires <= expires)) {
592 * We add a slack of 25% to gather wakeups
593 * without sacrificing the granularity.
595 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
596 (NSEC_PER_MSEC >> 2);
598 dev->power.timer_expires = expires;
599 hrtimer_start_range_ns(&dev->power.suspend_timer,
600 ns_to_ktime(expires),
604 dev->power.timer_autosuspends = 1;
609 /* Other scheduled or pending requests need to be canceled. */
610 pm_runtime_cancel_pending(dev);
612 if (dev->power.runtime_status == RPM_SUSPENDING) {
615 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
616 retval = -EINPROGRESS;
620 if (dev->power.irq_safe) {
621 spin_unlock(&dev->power.lock);
625 spin_lock(&dev->power.lock);
629 /* Wait for the other suspend running in parallel with us. */
631 prepare_to_wait(&dev->power.wait_queue, &wait,
632 TASK_UNINTERRUPTIBLE);
633 if (dev->power.runtime_status != RPM_SUSPENDING)
636 spin_unlock_irq(&dev->power.lock);
640 spin_lock_irq(&dev->power.lock);
642 finish_wait(&dev->power.wait_queue, &wait);
646 if (dev->power.no_callbacks)
647 goto no_callback; /* Assume success. */
649 /* Carry out an asynchronous or a synchronous suspend. */
650 if (rpmflags & RPM_ASYNC) {
651 dev->power.request = (rpmflags & RPM_AUTO) ?
652 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
653 if (!dev->power.request_pending) {
654 dev->power.request_pending = true;
655 queue_work(pm_wq, &dev->power.work);
660 __update_runtime_status(dev, RPM_SUSPENDING);
662 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
664 dev_pm_enable_wake_irq_check(dev, true);
665 retval = rpm_callback(callback, dev);
669 dev_pm_enable_wake_irq_complete(dev);
672 __update_runtime_status(dev, RPM_SUSPENDED);
673 pm_runtime_deactivate_timer(dev);
676 parent = dev->parent;
677 atomic_add_unless(&parent->power.child_count, -1, 0);
679 wake_up_all(&dev->power.wait_queue);
681 if (dev->power.deferred_resume) {
682 dev->power.deferred_resume = false;
688 if (dev->power.irq_safe)
691 /* Maybe the parent is now able to suspend. */
692 if (parent && !parent->power.ignore_children) {
693 spin_unlock(&dev->power.lock);
695 spin_lock(&parent->power.lock);
696 rpm_idle(parent, RPM_ASYNC);
697 spin_unlock(&parent->power.lock);
699 spin_lock(&dev->power.lock);
701 /* Maybe the suppliers are now able to suspend. */
702 if (dev->power.links_count > 0) {
703 spin_unlock_irq(&dev->power.lock);
705 rpm_suspend_suppliers(dev);
707 spin_lock_irq(&dev->power.lock);
711 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
716 dev_pm_disable_wake_irq_check(dev, true);
717 __update_runtime_status(dev, RPM_ACTIVE);
718 dev->power.deferred_resume = false;
719 wake_up_all(&dev->power.wait_queue);
721 if (retval == -EAGAIN || retval == -EBUSY) {
722 dev->power.runtime_error = 0;
725 * If the callback routine failed an autosuspend, and
726 * if the last_busy time has been updated so that there
727 * is a new autosuspend expiration time, automatically
728 * reschedule another autosuspend.
730 if ((rpmflags & RPM_AUTO) &&
731 pm_runtime_autosuspend_expiration(dev) != 0)
734 pm_runtime_cancel_pending(dev);
740 * rpm_resume - Carry out runtime resume of given device.
741 * @dev: Device to resume.
742 * @rpmflags: Flag bits.
744 * Check if the device's runtime PM status allows it to be resumed. Cancel
745 * any scheduled or pending requests. If another resume has been started
746 * earlier, either return immediately or wait for it to finish, depending on the
747 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
748 * parallel with this function, either tell the other process to resume after
749 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
750 * flag is set then queue a resume request; otherwise run the
751 * ->runtime_resume() callback directly. Queue an idle notification for the
752 * device if the resume succeeded.
754 * This function must be called under dev->power.lock with interrupts disabled.
756 static int rpm_resume(struct device *dev, int rpmflags)
757 __releases(&dev->power.lock) __acquires(&dev->power.lock)
759 int (*callback)(struct device *);
760 struct device *parent = NULL;
763 trace_rpm_resume_rcuidle(dev, rpmflags);
766 if (dev->power.runtime_error) {
768 } else if (dev->power.disable_depth > 0) {
769 if (dev->power.runtime_status == RPM_ACTIVE &&
770 dev->power.last_status == RPM_ACTIVE)
779 * Other scheduled or pending requests need to be canceled. Small
780 * optimization: If an autosuspend timer is running, leave it running
781 * rather than cancelling it now only to restart it again in the near
784 dev->power.request = RPM_REQ_NONE;
785 if (!dev->power.timer_autosuspends)
786 pm_runtime_deactivate_timer(dev);
788 if (dev->power.runtime_status == RPM_ACTIVE) {
793 if (dev->power.runtime_status == RPM_RESUMING
794 || dev->power.runtime_status == RPM_SUSPENDING) {
797 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
798 if (dev->power.runtime_status == RPM_SUSPENDING)
799 dev->power.deferred_resume = true;
801 retval = -EINPROGRESS;
805 if (dev->power.irq_safe) {
806 spin_unlock(&dev->power.lock);
810 spin_lock(&dev->power.lock);
814 /* Wait for the operation carried out in parallel with us. */
816 prepare_to_wait(&dev->power.wait_queue, &wait,
817 TASK_UNINTERRUPTIBLE);
818 if (dev->power.runtime_status != RPM_RESUMING
819 && dev->power.runtime_status != RPM_SUSPENDING)
822 spin_unlock_irq(&dev->power.lock);
826 spin_lock_irq(&dev->power.lock);
828 finish_wait(&dev->power.wait_queue, &wait);
833 * See if we can skip waking up the parent. This is safe only if
834 * power.no_callbacks is set, because otherwise we don't know whether
835 * the resume will actually succeed.
837 if (dev->power.no_callbacks && !parent && dev->parent) {
838 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
839 if (dev->parent->power.disable_depth > 0
840 || dev->parent->power.ignore_children
841 || dev->parent->power.runtime_status == RPM_ACTIVE) {
842 atomic_inc(&dev->parent->power.child_count);
843 spin_unlock(&dev->parent->power.lock);
845 goto no_callback; /* Assume success. */
847 spin_unlock(&dev->parent->power.lock);
850 /* Carry out an asynchronous or a synchronous resume. */
851 if (rpmflags & RPM_ASYNC) {
852 dev->power.request = RPM_REQ_RESUME;
853 if (!dev->power.request_pending) {
854 dev->power.request_pending = true;
855 queue_work(pm_wq, &dev->power.work);
861 if (!parent && dev->parent) {
863 * Increment the parent's usage counter and resume it if
864 * necessary. Not needed if dev is irq-safe; then the
865 * parent is permanently resumed.
867 parent = dev->parent;
868 if (dev->power.irq_safe)
870 spin_unlock(&dev->power.lock);
872 pm_runtime_get_noresume(parent);
874 spin_lock(&parent->power.lock);
876 * Resume the parent if it has runtime PM enabled and not been
877 * set to ignore its children.
879 if (!parent->power.disable_depth
880 && !parent->power.ignore_children) {
881 rpm_resume(parent, 0);
882 if (parent->power.runtime_status != RPM_ACTIVE)
885 spin_unlock(&parent->power.lock);
887 spin_lock(&dev->power.lock);
894 if (dev->power.no_callbacks)
895 goto no_callback; /* Assume success. */
897 __update_runtime_status(dev, RPM_RESUMING);
899 callback = RPM_GET_CALLBACK(dev, runtime_resume);
901 dev_pm_disable_wake_irq_check(dev, false);
902 retval = rpm_callback(callback, dev);
904 __update_runtime_status(dev, RPM_SUSPENDED);
905 pm_runtime_cancel_pending(dev);
906 dev_pm_enable_wake_irq_check(dev, false);
909 __update_runtime_status(dev, RPM_ACTIVE);
910 pm_runtime_mark_last_busy(dev);
912 atomic_inc(&parent->power.child_count);
914 wake_up_all(&dev->power.wait_queue);
917 rpm_idle(dev, RPM_ASYNC);
920 if (parent && !dev->power.irq_safe) {
921 spin_unlock_irq(&dev->power.lock);
923 pm_runtime_put(parent);
925 spin_lock_irq(&dev->power.lock);
928 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
934 * pm_runtime_work - Universal runtime PM work function.
935 * @work: Work structure used for scheduling the execution of this function.
937 * Use @work to get the device object the work is to be done for, determine what
938 * is to be done and execute the appropriate runtime PM function.
940 static void pm_runtime_work(struct work_struct *work)
942 struct device *dev = container_of(work, struct device, power.work);
943 enum rpm_request req;
945 spin_lock_irq(&dev->power.lock);
947 if (!dev->power.request_pending)
950 req = dev->power.request;
951 dev->power.request = RPM_REQ_NONE;
952 dev->power.request_pending = false;
958 rpm_idle(dev, RPM_NOWAIT);
960 case RPM_REQ_SUSPEND:
961 rpm_suspend(dev, RPM_NOWAIT);
963 case RPM_REQ_AUTOSUSPEND:
964 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
967 rpm_resume(dev, RPM_NOWAIT);
972 spin_unlock_irq(&dev->power.lock);
976 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
977 * @timer: hrtimer used by pm_schedule_suspend().
979 * Check if the time is right and queue a suspend request.
981 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
983 struct device *dev = container_of(timer, struct device, power.suspend_timer);
987 spin_lock_irqsave(&dev->power.lock, flags);
989 expires = dev->power.timer_expires;
991 * If 'expires' is after the current time, we've been called
994 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
995 dev->power.timer_expires = 0;
996 rpm_suspend(dev, dev->power.timer_autosuspends ?
997 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1000 spin_unlock_irqrestore(&dev->power.lock, flags);
1002 return HRTIMER_NORESTART;
1006 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1007 * @dev: Device to suspend.
1008 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1010 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1012 unsigned long flags;
1016 spin_lock_irqsave(&dev->power.lock, flags);
1019 retval = rpm_suspend(dev, RPM_ASYNC);
1023 retval = rpm_check_suspend_allowed(dev);
1027 /* Other scheduled or pending requests need to be canceled. */
1028 pm_runtime_cancel_pending(dev);
1030 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1031 dev->power.timer_expires = expires;
1032 dev->power.timer_autosuspends = 0;
1033 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1036 spin_unlock_irqrestore(&dev->power.lock, flags);
1040 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1042 static int rpm_drop_usage_count(struct device *dev)
1046 ret = atomic_sub_return(1, &dev->power.usage_count);
1051 * Because rpm_resume() does not check the usage counter, it will resume
1052 * the device even if the usage counter is 0 or negative, so it is
1053 * sufficient to increment the usage counter here to reverse the change
1056 atomic_inc(&dev->power.usage_count);
1057 dev_warn(dev, "Runtime PM usage count underflow!\n");
1062 * __pm_runtime_idle - Entry point for runtime idle operations.
1063 * @dev: Device to send idle notification for.
1064 * @rpmflags: Flag bits.
1066 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1067 * return immediately if it is larger than zero (if it becomes negative, log a
1068 * warning, increment it, and return an error). Then carry out an idle
1069 * notification, either synchronous or asynchronous.
1071 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1072 * or if pm_runtime_irq_safe() has been called.
1074 int __pm_runtime_idle(struct device *dev, int rpmflags)
1076 unsigned long flags;
1079 if (rpmflags & RPM_GET_PUT) {
1080 retval = rpm_drop_usage_count(dev);
1083 } else if (retval > 0) {
1084 trace_rpm_usage_rcuidle(dev, rpmflags);
1089 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1091 spin_lock_irqsave(&dev->power.lock, flags);
1092 retval = rpm_idle(dev, rpmflags);
1093 spin_unlock_irqrestore(&dev->power.lock, flags);
1097 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1100 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1101 * @dev: Device to suspend.
1102 * @rpmflags: Flag bits.
1104 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1105 * return immediately if it is larger than zero (if it becomes negative, log a
1106 * warning, increment it, and return an error). Then carry out a suspend,
1107 * either synchronous or asynchronous.
1109 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1110 * or if pm_runtime_irq_safe() has been called.
1112 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1114 unsigned long flags;
1117 if (rpmflags & RPM_GET_PUT) {
1118 retval = rpm_drop_usage_count(dev);
1121 } else if (retval > 0) {
1122 trace_rpm_usage_rcuidle(dev, rpmflags);
1127 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1129 spin_lock_irqsave(&dev->power.lock, flags);
1130 retval = rpm_suspend(dev, rpmflags);
1131 spin_unlock_irqrestore(&dev->power.lock, flags);
1135 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1138 * __pm_runtime_resume - Entry point for runtime resume operations.
1139 * @dev: Device to resume.
1140 * @rpmflags: Flag bits.
1142 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1143 * carry out a resume, either synchronous or asynchronous.
1145 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1146 * or if pm_runtime_irq_safe() has been called.
1148 int __pm_runtime_resume(struct device *dev, int rpmflags)
1150 unsigned long flags;
1153 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1154 dev->power.runtime_status != RPM_ACTIVE);
1156 if (rpmflags & RPM_GET_PUT)
1157 atomic_inc(&dev->power.usage_count);
1159 spin_lock_irqsave(&dev->power.lock, flags);
1160 retval = rpm_resume(dev, rpmflags);
1161 spin_unlock_irqrestore(&dev->power.lock, flags);
1165 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1168 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1169 * @dev: Device to handle.
1170 * @ign_usage_count: Whether or not to look at the current usage counter value.
1172 * Return -EINVAL if runtime PM is disabled for @dev.
1174 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1175 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1176 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1177 * without changing the usage counter.
1179 * If @ign_usage_count is %true, this function can be used to prevent suspending
1180 * the device when its runtime PM status is %RPM_ACTIVE.
1182 * If @ign_usage_count is %false, this function can be used to prevent
1183 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1184 * runtime PM usage counter is not zero.
1186 * The caller is responsible for decrementing the runtime PM usage counter of
1187 * @dev after this function has returned a positive value for it.
1189 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1191 unsigned long flags;
1194 spin_lock_irqsave(&dev->power.lock, flags);
1195 if (dev->power.disable_depth > 0) {
1197 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1199 } else if (ign_usage_count) {
1201 atomic_inc(&dev->power.usage_count);
1203 retval = atomic_inc_not_zero(&dev->power.usage_count);
1205 trace_rpm_usage_rcuidle(dev, 0);
1206 spin_unlock_irqrestore(&dev->power.lock, flags);
1210 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1213 * __pm_runtime_set_status - Set runtime PM status of a device.
1214 * @dev: Device to handle.
1215 * @status: New runtime PM status of the device.
1217 * If runtime PM of the device is disabled or its power.runtime_error field is
1218 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1219 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1220 * However, if the device has a parent and the parent is not active, and the
1221 * parent's power.ignore_children flag is unset, the device's status cannot be
1222 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1224 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1225 * and the device parent's counter of unsuspended children is modified to
1226 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1227 * notification request for the parent is submitted.
1229 * If @dev has any suppliers (as reflected by device links to them), and @status
1230 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1231 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1232 * of the @status value) and the suppliers will be deacticated on exit. The
1233 * error returned by the failing supplier activation will be returned in that
1236 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1238 struct device *parent = dev->parent;
1239 bool notify_parent = false;
1240 unsigned long flags;
1243 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1246 spin_lock_irqsave(&dev->power.lock, flags);
1249 * Prevent PM-runtime from being enabled for the device or return an
1250 * error if it is enabled already and working.
1252 if (dev->power.runtime_error || dev->power.disable_depth)
1253 dev->power.disable_depth++;
1257 spin_unlock_irqrestore(&dev->power.lock, flags);
1263 * If the new status is RPM_ACTIVE, the suppliers can be activated
1264 * upfront regardless of the current status, because next time
1265 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1266 * involved will be dropped down to one anyway.
1268 if (status == RPM_ACTIVE) {
1269 int idx = device_links_read_lock();
1271 error = rpm_get_suppliers(dev);
1273 status = RPM_SUSPENDED;
1275 device_links_read_unlock(idx);
1278 spin_lock_irqsave(&dev->power.lock, flags);
1280 if (dev->power.runtime_status == status || !parent)
1283 if (status == RPM_SUSPENDED) {
1284 atomic_add_unless(&parent->power.child_count, -1, 0);
1285 notify_parent = !parent->power.ignore_children;
1287 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1290 * It is invalid to put an active child under a parent that is
1291 * not active, has runtime PM enabled and the
1292 * 'power.ignore_children' flag unset.
1294 if (!parent->power.disable_depth
1295 && !parent->power.ignore_children
1296 && parent->power.runtime_status != RPM_ACTIVE) {
1297 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1301 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1302 atomic_inc(&parent->power.child_count);
1305 spin_unlock(&parent->power.lock);
1308 status = RPM_SUSPENDED;
1314 __update_runtime_status(dev, status);
1316 dev->power.runtime_error = 0;
1319 spin_unlock_irqrestore(&dev->power.lock, flags);
1322 pm_request_idle(parent);
1324 if (status == RPM_SUSPENDED) {
1325 int idx = device_links_read_lock();
1327 rpm_put_suppliers(dev);
1329 device_links_read_unlock(idx);
1332 pm_runtime_enable(dev);
1336 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1339 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1340 * @dev: Device to handle.
1342 * Flush all pending requests for the device from pm_wq and wait for all
1343 * runtime PM operations involving the device in progress to complete.
1345 * Should be called under dev->power.lock with interrupts disabled.
1347 static void __pm_runtime_barrier(struct device *dev)
1349 pm_runtime_deactivate_timer(dev);
1351 if (dev->power.request_pending) {
1352 dev->power.request = RPM_REQ_NONE;
1353 spin_unlock_irq(&dev->power.lock);
1355 cancel_work_sync(&dev->power.work);
1357 spin_lock_irq(&dev->power.lock);
1358 dev->power.request_pending = false;
1361 if (dev->power.runtime_status == RPM_SUSPENDING
1362 || dev->power.runtime_status == RPM_RESUMING
1363 || dev->power.idle_notification) {
1366 /* Suspend, wake-up or idle notification in progress. */
1368 prepare_to_wait(&dev->power.wait_queue, &wait,
1369 TASK_UNINTERRUPTIBLE);
1370 if (dev->power.runtime_status != RPM_SUSPENDING
1371 && dev->power.runtime_status != RPM_RESUMING
1372 && !dev->power.idle_notification)
1374 spin_unlock_irq(&dev->power.lock);
1378 spin_lock_irq(&dev->power.lock);
1380 finish_wait(&dev->power.wait_queue, &wait);
1385 * pm_runtime_barrier - Flush pending requests and wait for completions.
1386 * @dev: Device to handle.
1388 * Prevent the device from being suspended by incrementing its usage counter and
1389 * if there's a pending resume request for the device, wake the device up.
1390 * Next, make sure that all pending requests for the device have been flushed
1391 * from pm_wq and wait for all runtime PM operations involving the device in
1392 * progress to complete.
1395 * 1, if there was a resume request pending and the device had to be woken up,
1398 int pm_runtime_barrier(struct device *dev)
1402 pm_runtime_get_noresume(dev);
1403 spin_lock_irq(&dev->power.lock);
1405 if (dev->power.request_pending
1406 && dev->power.request == RPM_REQ_RESUME) {
1411 __pm_runtime_barrier(dev);
1413 spin_unlock_irq(&dev->power.lock);
1414 pm_runtime_put_noidle(dev);
1418 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1421 * __pm_runtime_disable - Disable runtime PM of a device.
1422 * @dev: Device to handle.
1423 * @check_resume: If set, check if there's a resume request for the device.
1425 * Increment power.disable_depth for the device and if it was zero previously,
1426 * cancel all pending runtime PM requests for the device and wait for all
1427 * operations in progress to complete. The device can be either active or
1428 * suspended after its runtime PM has been disabled.
1430 * If @check_resume is set and there's a resume request pending when
1431 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1432 * function will wake up the device before disabling its runtime PM.
1434 void __pm_runtime_disable(struct device *dev, bool check_resume)
1436 spin_lock_irq(&dev->power.lock);
1438 if (dev->power.disable_depth > 0) {
1439 dev->power.disable_depth++;
1444 * Wake up the device if there's a resume request pending, because that
1445 * means there probably is some I/O to process and disabling runtime PM
1446 * shouldn't prevent the device from processing the I/O.
1448 if (check_resume && dev->power.request_pending
1449 && dev->power.request == RPM_REQ_RESUME) {
1451 * Prevent suspends and idle notifications from being carried
1452 * out after we have woken up the device.
1454 pm_runtime_get_noresume(dev);
1458 pm_runtime_put_noidle(dev);
1461 /* Update time accounting before disabling PM-runtime. */
1462 update_pm_runtime_accounting(dev);
1464 if (!dev->power.disable_depth++) {
1465 __pm_runtime_barrier(dev);
1466 dev->power.last_status = dev->power.runtime_status;
1470 spin_unlock_irq(&dev->power.lock);
1472 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1475 * pm_runtime_enable - Enable runtime PM of a device.
1476 * @dev: Device to handle.
1478 void pm_runtime_enable(struct device *dev)
1480 unsigned long flags;
1482 spin_lock_irqsave(&dev->power.lock, flags);
1484 if (!dev->power.disable_depth) {
1485 dev_warn(dev, "Unbalanced %s!\n", __func__);
1489 if (--dev->power.disable_depth > 0)
1492 dev->power.last_status = RPM_INVALID;
1493 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1495 if (dev->power.runtime_status == RPM_SUSPENDED &&
1496 !dev->power.ignore_children &&
1497 atomic_read(&dev->power.child_count) > 0)
1498 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1501 spin_unlock_irqrestore(&dev->power.lock, flags);
1503 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1505 static void pm_runtime_disable_action(void *data)
1507 pm_runtime_dont_use_autosuspend(data);
1508 pm_runtime_disable(data);
1512 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1514 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1515 * you at driver exit time if needed.
1517 * @dev: Device to handle.
1519 int devm_pm_runtime_enable(struct device *dev)
1521 pm_runtime_enable(dev);
1523 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1525 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1528 * pm_runtime_forbid - Block runtime PM of a device.
1529 * @dev: Device to handle.
1531 * Increase the device's usage count and clear its power.runtime_auto flag,
1532 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1535 void pm_runtime_forbid(struct device *dev)
1537 spin_lock_irq(&dev->power.lock);
1538 if (!dev->power.runtime_auto)
1541 dev->power.runtime_auto = false;
1542 atomic_inc(&dev->power.usage_count);
1546 spin_unlock_irq(&dev->power.lock);
1548 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1551 * pm_runtime_allow - Unblock runtime PM of a device.
1552 * @dev: Device to handle.
1554 * Decrease the device's usage count and set its power.runtime_auto flag.
1556 void pm_runtime_allow(struct device *dev)
1560 spin_lock_irq(&dev->power.lock);
1561 if (dev->power.runtime_auto)
1564 dev->power.runtime_auto = true;
1565 ret = rpm_drop_usage_count(dev);
1567 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1569 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1572 spin_unlock_irq(&dev->power.lock);
1574 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1577 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1578 * @dev: Device to handle.
1580 * Set the power.no_callbacks flag, which tells the PM core that this
1581 * device is power-managed through its parent and has no runtime PM
1582 * callbacks of its own. The runtime sysfs attributes will be removed.
1584 void pm_runtime_no_callbacks(struct device *dev)
1586 spin_lock_irq(&dev->power.lock);
1587 dev->power.no_callbacks = 1;
1588 spin_unlock_irq(&dev->power.lock);
1589 if (device_is_registered(dev))
1590 rpm_sysfs_remove(dev);
1592 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1595 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1596 * @dev: Device to handle
1598 * Set the power.irq_safe flag, which tells the PM core that the
1599 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1600 * always be invoked with the spinlock held and interrupts disabled. It also
1601 * causes the parent's usage counter to be permanently incremented, preventing
1602 * the parent from runtime suspending -- otherwise an irq-safe child might have
1603 * to wait for a non-irq-safe parent.
1605 void pm_runtime_irq_safe(struct device *dev)
1608 pm_runtime_get_sync(dev->parent);
1609 spin_lock_irq(&dev->power.lock);
1610 dev->power.irq_safe = 1;
1611 spin_unlock_irq(&dev->power.lock);
1613 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1616 * update_autosuspend - Handle a change to a device's autosuspend settings.
1617 * @dev: Device to handle.
1618 * @old_delay: The former autosuspend_delay value.
1619 * @old_use: The former use_autosuspend value.
1621 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1622 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1624 * This function must be called under dev->power.lock with interrupts disabled.
1626 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1628 int delay = dev->power.autosuspend_delay;
1630 /* Should runtime suspend be prevented now? */
1631 if (dev->power.use_autosuspend && delay < 0) {
1633 /* If it used to be allowed then prevent it. */
1634 if (!old_use || old_delay >= 0) {
1635 atomic_inc(&dev->power.usage_count);
1638 trace_rpm_usage_rcuidle(dev, 0);
1642 /* Runtime suspend should be allowed now. */
1645 /* If it used to be prevented then allow it. */
1646 if (old_use && old_delay < 0)
1647 atomic_dec(&dev->power.usage_count);
1649 /* Maybe we can autosuspend now. */
1650 rpm_idle(dev, RPM_AUTO);
1655 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1656 * @dev: Device to handle.
1657 * @delay: Value of the new delay in milliseconds.
1659 * Set the device's power.autosuspend_delay value. If it changes to negative
1660 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1661 * changes the other way, allow runtime suspends.
1663 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1665 int old_delay, old_use;
1667 spin_lock_irq(&dev->power.lock);
1668 old_delay = dev->power.autosuspend_delay;
1669 old_use = dev->power.use_autosuspend;
1670 dev->power.autosuspend_delay = delay;
1671 update_autosuspend(dev, old_delay, old_use);
1672 spin_unlock_irq(&dev->power.lock);
1674 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1677 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1678 * @dev: Device to handle.
1679 * @use: New value for use_autosuspend.
1681 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1682 * suspends as needed.
1684 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1686 int old_delay, old_use;
1688 spin_lock_irq(&dev->power.lock);
1689 old_delay = dev->power.autosuspend_delay;
1690 old_use = dev->power.use_autosuspend;
1691 dev->power.use_autosuspend = use;
1692 update_autosuspend(dev, old_delay, old_use);
1693 spin_unlock_irq(&dev->power.lock);
1695 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1698 * pm_runtime_init - Initialize runtime PM fields in given device object.
1699 * @dev: Device object to initialize.
1701 void pm_runtime_init(struct device *dev)
1703 dev->power.runtime_status = RPM_SUSPENDED;
1704 dev->power.last_status = RPM_INVALID;
1705 dev->power.idle_notification = false;
1707 dev->power.disable_depth = 1;
1708 atomic_set(&dev->power.usage_count, 0);
1710 dev->power.runtime_error = 0;
1712 atomic_set(&dev->power.child_count, 0);
1713 pm_suspend_ignore_children(dev, false);
1714 dev->power.runtime_auto = true;
1716 dev->power.request_pending = false;
1717 dev->power.request = RPM_REQ_NONE;
1718 dev->power.deferred_resume = false;
1719 dev->power.needs_force_resume = 0;
1720 INIT_WORK(&dev->power.work, pm_runtime_work);
1722 dev->power.timer_expires = 0;
1723 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1724 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1726 init_waitqueue_head(&dev->power.wait_queue);
1730 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1731 * @dev: Device object to re-initialize.
1733 void pm_runtime_reinit(struct device *dev)
1735 if (!pm_runtime_enabled(dev)) {
1736 if (dev->power.runtime_status == RPM_ACTIVE)
1737 pm_runtime_set_suspended(dev);
1738 if (dev->power.irq_safe) {
1739 spin_lock_irq(&dev->power.lock);
1740 dev->power.irq_safe = 0;
1741 spin_unlock_irq(&dev->power.lock);
1743 pm_runtime_put(dev->parent);
1749 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1750 * @dev: Device object being removed from device hierarchy.
1752 void pm_runtime_remove(struct device *dev)
1754 __pm_runtime_disable(dev, false);
1755 pm_runtime_reinit(dev);
1759 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1760 * @dev: Consumer device.
1762 void pm_runtime_get_suppliers(struct device *dev)
1764 struct device_link *link;
1767 idx = device_links_read_lock();
1769 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1770 device_links_read_lock_held())
1771 if (link->flags & DL_FLAG_PM_RUNTIME) {
1772 link->supplier_preactivated = true;
1773 pm_runtime_get_sync(link->supplier);
1774 refcount_inc(&link->rpm_active);
1777 device_links_read_unlock(idx);
1781 * pm_runtime_put_suppliers - Drop references to supplier devices.
1782 * @dev: Consumer device.
1784 void pm_runtime_put_suppliers(struct device *dev)
1786 struct device_link *link;
1789 idx = device_links_read_lock();
1791 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1792 device_links_read_lock_held())
1793 if (link->supplier_preactivated) {
1796 link->supplier_preactivated = false;
1798 spin_lock_irq(&dev->power.lock);
1800 put = pm_runtime_status_suspended(dev) &&
1801 refcount_dec_not_one(&link->rpm_active);
1803 spin_unlock_irq(&dev->power.lock);
1806 pm_runtime_put(link->supplier);
1809 device_links_read_unlock(idx);
1812 void pm_runtime_new_link(struct device *dev)
1814 spin_lock_irq(&dev->power.lock);
1815 dev->power.links_count++;
1816 spin_unlock_irq(&dev->power.lock);
1819 static void pm_runtime_drop_link_count(struct device *dev)
1821 spin_lock_irq(&dev->power.lock);
1822 WARN_ON(dev->power.links_count == 0);
1823 dev->power.links_count--;
1824 spin_unlock_irq(&dev->power.lock);
1828 * pm_runtime_drop_link - Prepare for device link removal.
1829 * @link: Device link going away.
1831 * Drop the link count of the consumer end of @link and decrement the supplier
1832 * device's runtime PM usage counter as many times as needed to drop all of the
1833 * PM runtime reference to it from the consumer.
1835 void pm_runtime_drop_link(struct device_link *link)
1837 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1840 pm_runtime_drop_link_count(link->consumer);
1841 pm_runtime_release_supplier(link, true);
1844 static bool pm_runtime_need_not_resume(struct device *dev)
1846 return atomic_read(&dev->power.usage_count) <= 1 &&
1847 (atomic_read(&dev->power.child_count) == 0 ||
1848 dev->power.ignore_children);
1852 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1853 * @dev: Device to suspend.
1855 * Disable runtime PM so we safely can check the device's runtime PM status and
1856 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1857 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1858 * usage and children counters don't indicate that the device was in use before
1859 * the system-wide transition under way, decrement its parent's children counter
1860 * (if there is a parent). Keep runtime PM disabled to preserve the state
1861 * unless we encounter errors.
1863 * Typically this function may be invoked from a system suspend callback to make
1864 * sure the device is put into low power state and it should only be used during
1865 * system-wide PM transitions to sleep states. It assumes that the analogous
1866 * pm_runtime_force_resume() will be used to resume the device.
1868 int pm_runtime_force_suspend(struct device *dev)
1870 int (*callback)(struct device *);
1873 pm_runtime_disable(dev);
1874 if (pm_runtime_status_suspended(dev))
1877 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1879 ret = callback ? callback(dev) : 0;
1884 * If the device can stay in suspend after the system-wide transition
1885 * to the working state that will follow, drop the children counter of
1886 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1887 * function will be called again for it in the meantime.
1889 if (pm_runtime_need_not_resume(dev)) {
1890 pm_runtime_set_suspended(dev);
1892 __update_runtime_status(dev, RPM_SUSPENDED);
1893 dev->power.needs_force_resume = 1;
1899 pm_runtime_enable(dev);
1902 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1905 * pm_runtime_force_resume - Force a device into resume state if needed.
1906 * @dev: Device to resume.
1908 * Prior invoking this function we expect the user to have brought the device
1909 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1910 * those actions and bring the device into full power, if it is expected to be
1911 * used on system resume. In the other case, we defer the resume to be managed
1914 * Typically this function may be invoked from a system resume callback.
1916 int pm_runtime_force_resume(struct device *dev)
1918 int (*callback)(struct device *);
1921 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1925 * The value of the parent's children counter is correct already, so
1926 * just update the status of the device.
1928 __update_runtime_status(dev, RPM_ACTIVE);
1930 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1932 ret = callback ? callback(dev) : 0;
1934 pm_runtime_set_suspended(dev);
1938 pm_runtime_mark_last_busy(dev);
1940 dev->power.needs_force_resume = 0;
1941 pm_runtime_enable(dev);
1944 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);