runtime pm patches added
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 May 2012 23:38:46 +0000 (16:38 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 May 2012 23:38:46 +0000 (16:38 -0700)
211 files changed:
patches.runtime_pm/0001-Revert-PM-Runtime-Automatically-retry-failed-autosus.patch [new file with mode: 0644]
patches.runtime_pm/0002-PM-Domains-Rename-struct-dev_power_domain-to-struct-.patch [new file with mode: 0644]
patches.runtime_pm/0003-PM-subsys_data-in-struct-dev_pm_info-need-not-depend.patch [new file with mode: 0644]
patches.runtime_pm/0004-PM-Domains-Support-for-generic-I-O-PM-domains-v8.patch [new file with mode: 0644]
patches.runtime_pm/0005-PM-Introduce-generic-noirq-callback-routines-for-sub.patch [new file with mode: 0644]
patches.runtime_pm/0006-PM-Domains-Move-code-from-under-ifdef-CONFIG_PM_RUNT.patch [new file with mode: 0644]
patches.runtime_pm/0007-PM-Domains-System-wide-transitions-support-for-gener.patch [new file with mode: 0644]
patches.runtime_pm/0008-PM-Domains-Wakeup-devices-support-for-system-sleep-t.patch [new file with mode: 0644]
patches.runtime_pm/0009-PM-Allow-the-clocks-management-code-to-be-used-durin.patch [new file with mode: 0644]
patches.runtime_pm/0010-PM-Rename-clock-management-functions.patch [new file with mode: 0644]
patches.runtime_pm/0011-PM-Runtime-Update-documentation-of-interactions-with.patch [new file with mode: 0644]
patches.runtime_pm/0012-PM-Runtime-Return-special-error-code-if-runtime-PM-i.patch [new file with mode: 0644]
patches.runtime_pm/0013-PM-Limit-race-conditions-between-runtime-PM-and-syst.patch [new file with mode: 0644]
patches.runtime_pm/0014-PM-Runtime-Improve-documentation-of-enable-disable-a.patch [new file with mode: 0644]
patches.runtime_pm/0015-PM-Runtime-Replace-run-time-with-runtime-in-document.patch [new file with mode: 0644]
patches.runtime_pm/0016-PM-Runtime-Prevent-runtime_resume-from-racing-with-p.patch [new file with mode: 0644]
patches.runtime_pm/0017-PM-Runtime-Consistent-utilization-of-deferred_resume.patch [new file with mode: 0644]
patches.runtime_pm/0018-PM-Domains-Export-pm_genpd_poweron-in-header.patch [new file with mode: 0644]
patches.runtime_pm/0019-doc-Konfig-Documentation-power-pm-apm-acpi-.txt.patch [new file with mode: 0644]
patches.runtime_pm/0020-PM-Domains-Set-device-state-to-active-during-system-.patch [new file with mode: 0644]
patches.runtime_pm/0021-PM-Domains-Make-failing-pm_genpd_prepare-clean-up-pr.patch [new file with mode: 0644]
patches.runtime_pm/0022-PM-Domains-Do-not-execute-device-callbacks-under-loc.patch [new file with mode: 0644]
patches.runtime_pm/0023-PM-Domains-Allow-callbacks-to-execute-all-runtime-PM.patch [new file with mode: 0644]
patches.runtime_pm/0024-PM-Domains-Do-not-restore-all-devices-on-power-off-e.patch [new file with mode: 0644]
patches.runtime_pm/0025-PM-Domains-Improve-handling-of-wakeup-devices-during.patch [new file with mode: 0644]
patches.runtime_pm/0026-PM-Domains-Queue-up-power-off-work-only-if-it-is-not.patch [new file with mode: 0644]
patches.runtime_pm/0027-PM-Runtime-Add-new-helper-function-pm_runtime_status.patch [new file with mode: 0644]
patches.runtime_pm/0028-PM-Domains-Introduce-function-to-power-off-all-unuse.patch [new file with mode: 0644]
patches.runtime_pm/0029-ARM-shmobile-Use-genpd_queue_power_off_work.patch [new file with mode: 0644]
patches.runtime_pm/0030-PM-Domains-Take-.power_off-error-code-into-account.patch [new file with mode: 0644]
patches.runtime_pm/0031-PM-OPP-Introduce-function-to-free-cpufreq-table.patch [new file with mode: 0644]
patches.runtime_pm/0032-PM-Suspend-Add-.suspend_again-callback-to-suspend_op.patch [new file with mode: 0644]
patches.runtime_pm/0033-PM-Suspend-Export-suspend_set_ops-suspend_valid_only.patch [new file with mode: 0644]
patches.runtime_pm/0034-PM-Add-RTC-to-PM-trace-time-stamps-to-avoid-confusio.patch [new file with mode: 0644]
patches.runtime_pm/0035-PM-Improve-error-code-of-pm_notifier_call_chain.patch [new file with mode: 0644]
patches.runtime_pm/0036-drivers-base-power-opp.c-fix-dev_opp-initial-value.patch [new file with mode: 0644]
patches.runtime_pm/0037-PM-Domains-Fix-pm_genpd_poweron.patch [new file with mode: 0644]
patches.runtime_pm/0038-PM-Runtime-Allow-_put_sync-from-interrupts-disabled-.patch [new file with mode: 0644]
patches.runtime_pm/0039-PM-Domains-Fix-build-for-CONFIG_PM_RUNTIME-unset.patch [new file with mode: 0644]
patches.runtime_pm/0040-PM-Runtime-Add-might_sleep-to-runtime-PM-functions.patch [new file with mode: 0644]
patches.runtime_pm/0041-PM-Runtime-Add-macro-to-test-for-runtime-PM-events.patch [new file with mode: 0644]
patches.runtime_pm/0042-PM-Use-spinlock-instead-of-mutex-in-clock-management.patch [new file with mode: 0644]
patches.runtime_pm/0043-PM-Runtime-Correct-documentation-of-pm_runtime_irq_s.patch [new file with mode: 0644]
patches.runtime_pm/0044-PM-Domains-Implement-subdomain-counters-as-atomic-fi.patch [new file with mode: 0644]
patches.runtime_pm/0045-PM-Domains-Do-not-take-parent-locks-to-modify-subdom.patch [new file with mode: 0644]
patches.runtime_pm/0046-PM-Domains-Make-pm_genpd_poweron-always-survive-pare.patch [new file with mode: 0644]
patches.runtime_pm/0047-PM-Domains-Add-wait-for-parent-status-for-generic-PM.patch [new file with mode: 0644]
patches.runtime_pm/0048-PM-Domains-Allow-generic-PM-domains-to-have-multiple.patch [new file with mode: 0644]
patches.runtime_pm/0049-PM-Domains-Rename-GPD_STATE_WAIT_PARENT-to-GPD_STATE.patch [new file with mode: 0644]
patches.runtime_pm/0050-PM-Domains-Rename-argument-of-pm_genpd_add_subdomain.patch [new file with mode: 0644]
patches.runtime_pm/0051-PM-Introduce-struct-pm_subsys_data.patch [new file with mode: 0644]
patches.runtime_pm/0052-PM-Reference-counting-of-power.subsys_data.patch [new file with mode: 0644]
patches.runtime_pm/0053-PM-Domains-Use-power.sybsys_data-to-reduce-overhead.patch [new file with mode: 0644]
patches.runtime_pm/0054-PM-QoS-Move-and-rename-the-implementation-files.patch [new file with mode: 0644]
patches.runtime_pm/0055-plist-Remove-the-need-to-supply-locks-to-plist-heads.patch [new file with mode: 0644]
patches.runtime_pm/0056-PM-QoS-Minor-clean-ups.patch [new file with mode: 0644]
patches.runtime_pm/0057-PM-QoS-Code-reorganization.patch [new file with mode: 0644]
patches.runtime_pm/0058-PM-QoS-Reorganize-data-structs.patch [new file with mode: 0644]
patches.runtime_pm/0059-PM-QoS-Generalize-and-export-constraints-management-.patch [new file with mode: 0644]
patches.runtime_pm/0060-PM-QoS-Implement-per-device-PM-QoS-constraints.patch [new file with mode: 0644]
patches.runtime_pm/0061-PM-QoS-Add-global-notification-mechanism-for-device-.patch [new file with mode: 0644]
patches.runtime_pm/0062-PM-Domains-Preliminary-support-for-devices-with-powe.patch [new file with mode: 0644]
patches.runtime_pm/0063-PM-Runtime-pm_runtime_idle-can-be-called-in-atomic-c.patch [new file with mode: 0644]
patches.runtime_pm/0064-cpu_pm-Add-cpu-power-management-notifiers.patch [new file with mode: 0644]
patches.runtime_pm/0065-PM-Clocks-Do-not-acquire-a-mutex-under-a-spinlock.patch [new file with mode: 0644]
patches.runtime_pm/0066-PM-Domains-Split-device-PM-domain-data-into-base-and.patch [new file with mode: 0644]
patches.runtime_pm/0067-doc-fix-broken-references.patch [new file with mode: 0644]
patches.runtime_pm/0068-PM-Runtime-Don-t-run-callbacks-under-lock-for-power..patch [new file with mode: 0644]
patches.runtime_pm/0069-PM-Runtime-Introduce-trace-points-for-tracing-rpm_-f.patch [new file with mode: 0644]
patches.runtime_pm/0070-PM-Tracing-build-rpm-traces.c-only-if-CONFIG_PM_RUNT.patch [new file with mode: 0644]
patches.runtime_pm/0071-PM-Runtime-Replace-dev_dbg-with-trace_rpm_.patch [new file with mode: 0644]
patches.runtime_pm/0072-PM-OPP-Add-OPP-availability-change-notifier.patch [new file with mode: 0644]
patches.runtime_pm/0073-PM-OPP-Fix-build-when-CONFIG_PM_OPP-is-not-set.patch [new file with mode: 0644]
patches.runtime_pm/0074-PM-QoS-Add-function-dev_pm_qos_read_value-v3.patch [new file with mode: 0644]
patches.runtime_pm/0075-PM-QoS-Update-Documentation-for-the-pm_qos-and-dev_p.patch [new file with mode: 0644]
patches.runtime_pm/0076-regulator-Fix-some-bitrot-in-the-machine-driver-docu.patch [new file with mode: 0644]
patches.runtime_pm/0077-regulator-Clarify-documentation-for-regulator-regula.patch [new file with mode: 0644]
patches.runtime_pm/0078-PM-Runtime-Update-document-about-callbacks.patch [new file with mode: 0644]
patches.runtime_pm/0079-PM-Runtime-Fix-kerneldoc-comment-for-rpm_suspend.patch [new file with mode: 0644]
patches.runtime_pm/0080-PM-Runtime-Handle-.runtime_suspend-failure-correctly.patch [new file with mode: 0644]
patches.runtime_pm/0081-PM-Suspend-Add-statistics-debugfs-file-for-suspend-t.patch [new file with mode: 0644]
patches.runtime_pm/0082-PM-Fix-build-issue-in-main.c-for-CONFIG_PM_SLEEP-uns.patch [new file with mode: 0644]
patches.runtime_pm/0083-PM-Hibernate-Include-storage-keys-in-hibernation-ima.patch [new file with mode: 0644]
patches.runtime_pm/0084-PM-VT-Cleanup-if-defined-uglyness-and-fix-compile-er.patch [new file with mode: 0644]
patches.runtime_pm/0085-PM-Update-the-policy-on-default-wakeup-settings.patch [new file with mode: 0644]
patches.runtime_pm/0086-PM-Hibernate-Freeze-kernel-threads-after-preallocati.patch [new file with mode: 0644]
patches.runtime_pm/0087-PM-Hibernate-Fix-typo-in-a-kerneldoc-comment.patch [new file with mode: 0644]
patches.runtime_pm/0088-PM-Hibernate-Add-resumewait-param-to-support-MMC-lik.patch [new file with mode: 0644]
patches.runtime_pm/0089-PM-Hibernate-Add-resumedelay-kernel-param-in-additio.patch [new file with mode: 0644]
patches.runtime_pm/0090-PM-Hibernate-Do-not-initialize-static-and-extern-var.patch [new file with mode: 0644]
patches.runtime_pm/0091-PM-Hibernate-Improve-performance-of-LZO-plain-hibern.patch [new file with mode: 0644]
patches.runtime_pm/0092-PM-Sleep-Mark-devices-involved-in-wakeup-signaling-d.patch [new file with mode: 0644]
patches.runtime_pm/0093-PM-Documentation-Update-docs-about-suspend-and-CPU-h.patch [new file with mode: 0644]
patches.runtime_pm/0094-PM-Clocks-Remove-redundant-NULL-checks-before-kfree.patch [new file with mode: 0644]
patches.runtime_pm/0095-kernel-fix-several-implicit-usasges-of-kmod.h.patch [new file with mode: 0644]
patches.runtime_pm/0096-kernel-Fix-files-explicitly-needing-EXPORT_SYMBOL-in.patch [new file with mode: 0644]
patches.runtime_pm/0097-drivers-base-Add-export.h-for-EXPORT_SYMBOL-THIS_MOD.patch [new file with mode: 0644]
patches.runtime_pm/0098-drivers-base-change-module.h-export.h-in-power-commo.patch [new file with mode: 0644]
patches.runtime_pm/0099-pm_runtime.h-explicitly-requires-notifier.h.patch [new file with mode: 0644]
patches.runtime_pm/0100-PM-Sleep-Update-freezer-documentation.patch [new file with mode: 0644]
patches.runtime_pm/0101-PM-Runtime-Fix-runtime-accounting-calculation-error.patch [new file with mode: 0644]
patches.runtime_pm/0102-PM-QoS-Remove-redundant-check.patch [new file with mode: 0644]
patches.runtime_pm/0103-PM-Runtime-Automatically-retry-failed-autosuspends.patch [new file with mode: 0644]
patches.runtime_pm/0104-PM-QoS-Set-cpu_dma_pm_qos-name.patch [new file with mode: 0644]
patches.runtime_pm/0105-PM-OPP-Use-ERR_CAST-instead-of-ERR_PTR-PTR_ERR.patch [new file with mode: 0644]
patches.runtime_pm/0106-PM-Clocks-Only-disable-enabled-clocks-in-pm_clk_susp.patch [new file with mode: 0644]
patches.runtime_pm/0107-PM-QoS-Properly-use-the-WARN-macro-in-dev_pm_qos_add.patch [new file with mode: 0644]
patches.runtime_pm/0108-PM-Sleep-Do-not-extend-wakeup-paths-to-devices-with-.patch [new file with mode: 0644]
patches.runtime_pm/0109-PM-Hibernate-Fix-the-early-termination-of-test-modes.patch [new file with mode: 0644]
patches.runtime_pm/0110-PM-Suspend-Fix-bug-in-suspend-statistics-update.patch [new file with mode: 0644]
patches.runtime_pm/0111-freezer-don-t-unnecessarily-set-PF_NOFREEZE-explicit.patch [new file with mode: 0644]
patches.runtime_pm/0112-freezer-fix-current-state-restoration-race-in-refrig.patch [new file with mode: 0644]
patches.runtime_pm/0113-freezer-unexport-refrigerator-and-update-try_to_free.patch [new file with mode: 0644]
patches.runtime_pm/0114-oom-thaw-threads-if-oom-killed-thread-is-frozen-befo.patch [new file with mode: 0644]
patches.runtime_pm/0115-freezer-implement-and-use-kthread_freezable_should_s.patch [new file with mode: 0644]
patches.runtime_pm/0116-freezer-rename-thaw_process-to-__thaw_task-and-simpl.patch [new file with mode: 0644]
patches.runtime_pm/0117-freezer-remove-racy-clear_freeze_flag-and-set-PF_NOF.patch [new file with mode: 0644]
patches.runtime_pm/0118-freezer-don-t-distinguish-nosig-tasks-on-thaw.patch [new file with mode: 0644]
patches.runtime_pm/0119-freezer-use-dedicated-lock-instead-of-task_lock-memo.patch [new file with mode: 0644]
patches.runtime_pm/0120-freezer-make-freezing-indicate-freeze-condition-in-e.patch [new file with mode: 0644]
patches.runtime_pm/0121-freezer-test-freezable-conditions-while-holding-free.patch [new file with mode: 0644]
patches.runtime_pm/0122-freezer-clean-up-freeze_processes-failure-path.patch [new file with mode: 0644]
patches.runtime_pm/0123-cgroup_freezer-prepare-for-removal-of-TIF_FREEZE.patch [new file with mode: 0644]
patches.runtime_pm/0124-freezer-make-freezing-test-freeze-conditions-in-effe.patch [new file with mode: 0644]
patches.runtime_pm/0125-Freezer-fix-more-fallout-from-the-thaw_process-renam.patch [new file with mode: 0644]
patches.runtime_pm/0126-freezer-remove-unused-sig_only-from-freeze_task.patch [new file with mode: 0644]
patches.runtime_pm/0127-PM-Hibernate-Do-not-leak-memory-in-error-test-code-p.patch [new file with mode: 0644]
patches.runtime_pm/0128-PM-Fix-indentation-and-remove-extraneous-whitespaces.patch [new file with mode: 0644]
patches.runtime_pm/0129-PM-Sleep-Remove-unnecessary-label-and-jumps-to-it-fo.patch [new file with mode: 0644]
patches.runtime_pm/0130-PM-Sleep-Simplify-device_suspend_noirq.patch [new file with mode: 0644]
patches.runtime_pm/0131-PM-Hibernate-Refactor-and-simplify-hibernation_snaps.patch [new file with mode: 0644]
patches.runtime_pm/0132-PM-Domains-Document-how-PM-domains-are-used-by-the-P.patch [new file with mode: 0644]
patches.runtime_pm/0133-PM-Sleep-Correct-inaccurate-information-in-devices.t.patch [new file with mode: 0644]
patches.runtime_pm/0134-PM-Runtime-Make-documentation-follow-the-new-behavio.patch [new file with mode: 0644]
patches.runtime_pm/0135-PM-Sleep-Update-documentation-related-to-system-wake.patch [new file with mode: 0644]
patches.runtime_pm/0136-PM-Update-comments-describing-device-power-managemen.patch [new file with mode: 0644]
patches.runtime_pm/0137-PM-Runtime-Use-device-PM-QoS-constraints-v2.patch [new file with mode: 0644]
patches.runtime_pm/0138-PM-Domains-Make-it-possible-to-use-per-device-domain.patch [new file with mode: 0644]
patches.runtime_pm/0139-PM-Domains-Introduce-save-restore-state-device-callb.patch [new file with mode: 0644]
patches.runtime_pm/0140-PM-Domains-Rework-system-suspend-callback-routines-v.patch [new file with mode: 0644]
patches.runtime_pm/0141-PM-Domains-Add-device-stop-governor-function-v4.patch [new file with mode: 0644]
patches.runtime_pm/0142-PM-Domains-Add-default-power-off-governor-function-v.patch [new file with mode: 0644]
patches.runtime_pm/0143-PM-Domains-Automatically-update-overoptimistic-laten.patch [new file with mode: 0644]
patches.runtime_pm/0144-PM-Domains-fix-compilation-failure-for-CONFIG_PM_GEN.patch [new file with mode: 0644]
patches.runtime_pm/0145-regulator-Fix-regulator_register-API-signature-in-Do.patch [new file with mode: 0644]
patches.runtime_pm/0146-PM-Hibernate-Enable-usermodehelpers-in-software_resu.patch [new file with mode: 0644]
patches.runtime_pm/0147-PM-Hibernate-Thaw-processes-in-SNAPSHOT_CREATE_IMAGE.patch [new file with mode: 0644]
patches.runtime_pm/0148-PM-Hibernate-Remove-deprecated-hibernation-test-mode.patch [new file with mode: 0644]
patches.runtime_pm/0149-PM-Sleep-Unify-diagnostic-messages-from-device-suspe.patch [new file with mode: 0644]
patches.runtime_pm/0150-PM-Hibernate-Replace-unintuitive-if-condition-in-ker.patch [new file with mode: 0644]
patches.runtime_pm/0151-PM-Domains-Make-it-possible-to-assign-names-to-gener.patch [new file with mode: 0644]
patches.runtime_pm/0152-PM-Domains-Fix-default-system-suspend-resume-operati.patch [new file with mode: 0644]
patches.runtime_pm/0153-PM-Sleep-Replace-mutex_-un-lock-pm_mutex-with-un-loc.patch [new file with mode: 0644]
patches.runtime_pm/0154-PM-Sleep-Recommend-un-lock_system_sleep-over-using-p.patch [new file with mode: 0644]
patches.runtime_pm/0155-PM-Domains-Provide-an-always-on-power-domain-governo.patch [new file with mode: 0644]
patches.runtime_pm/0156-PM-Hibernate-Remove-deprecated-hibernation-snapshot-.patch [new file with mode: 0644]
patches.runtime_pm/0157-PM-Sleep-Simplify-generic-system-suspend-callbacks.patch [new file with mode: 0644]
patches.runtime_pm/0158-PM-Sleep-Merge-internal-functions-in-generic_ops.c.patch [new file with mode: 0644]
patches.runtime_pm/0159-PM-Sleep-Make-pm_op-and-pm_noirq_op-return-callback-.patch [new file with mode: 0644]
patches.runtime_pm/0160-PM-Run-the-driver-callback-directly-if-the-subsystem.patch [new file with mode: 0644]
patches.runtime_pm/0161-PM-Drop-generic_subsys_pm_ops.patch [new file with mode: 0644]
patches.runtime_pm/0162-PM-QoS-Introduce-dev_pm_qos_add_ancestor_request.patch [new file with mode: 0644]
patches.runtime_pm/0163-power_supply-Add-initial-Charger-Manager-driver.patch [new file with mode: 0644]
patches.runtime_pm/0164-PM-Hibernate-Implement-compat_ioctl-for-dev-snapshot.patch [new file with mode: 0644]
patches.runtime_pm/0165-mm-more-intensive-memory-corruption-debugging.patch [new file with mode: 0644]
patches.runtime_pm/0166-PM-Hibernate-do-not-count-debug-pages-as-savable.patch [new file with mode: 0644]
patches.runtime_pm/0167-power_supply-Charger-Manager-Add-properties-for-powe.patch [new file with mode: 0644]
patches.runtime_pm/0168-PM-Domains-Fix-build-for-CONFIG_PM_SLEEP-unset.patch [new file with mode: 0644]
patches.runtime_pm/0169-PM-Domains-Skip-governor-functions-for-CONFIG_PM_RUN.patch [new file with mode: 0644]
patches.runtime_pm/0170-PM-Documentation-Fix-spelling-mistake-in-basic-pm-de.patch [new file with mode: 0644]
patches.runtime_pm/0171-PM-Documentation-Fix-minor-issue-in-freezing_of_task.patch [new file with mode: 0644]
patches.runtime_pm/0172-PM-Hibernate-Correct-additional-pages-number-calcula.patch [new file with mode: 0644]
patches.runtime_pm/0173-PM-Domains-Add-OF-support.patch [new file with mode: 0644]
patches.runtime_pm/0174-PM-Hibernate-Fix-s2disk-regression-related-to-freezi.patch [new file with mode: 0644]
patches.runtime_pm/0175-PM-Sleep-Introduce-late-suspend-and-early-resume-of-.patch [new file with mode: 0644]
patches.runtime_pm/0176-PM-Sleep-Introduce-generic-callbacks-for-new-device-.patch [new file with mode: 0644]
patches.runtime_pm/0177-PM-Domains-Run-late-early-device-suspend-callbacks-a.patch [new file with mode: 0644]
patches.runtime_pm/0178-PM-QoS-Simplify-PM-QoS-expansion-merge.patch [new file with mode: 0644]
patches.runtime_pm/0179-PM-Hibernate-Thaw-kernel-threads-in-SNAPSHOT_CREATE_.patch [new file with mode: 0644]
patches.runtime_pm/0180-PM-Freezer-Thaw-only-kernel-threads-if-freezing-of-k.patch [new file with mode: 0644]
patches.runtime_pm/0181-PM-QoS-CPU-C-state-breakage-with-PM-Qos-change.patch [new file with mode: 0644]
patches.runtime_pm/0182-PM-Suspend-Avoid-code-duplication-in-suspend-statist.patch [new file with mode: 0644]
patches.runtime_pm/0183-PM-Freezer-Docs-Document-the-beauty-of-freeze-thaw-s.patch [new file with mode: 0644]
patches.runtime_pm/0184-PM-Hibernate-Thaw-kernel-threads-in-hibernation_snap.patch [new file with mode: 0644]
patches.runtime_pm/0185-PM-Hibernate-Refactor-and-simplify-freezer_test_done.patch [new file with mode: 0644]
patches.runtime_pm/0186-PM-Domains-Provide-a-dummy-dev_gpd_data-when-generic.patch [new file with mode: 0644]
patches.runtime_pm/0187-PM-Make-sysrq-o-be-available-for-CONFIG_PM-unset.patch [new file with mode: 0644]
patches.runtime_pm/0188-PM-QoS-unconditionally-build-the-feature.patch [new file with mode: 0644]
patches.runtime_pm/0189-PM-Sleep-Initialize-wakeup-source-locks-in-wakeup_so.patch [new file with mode: 0644]
patches.runtime_pm/0190-PM-Sleep-Do-not-check-wakeup-too-often-in-try_to_fre.patch [new file with mode: 0644]
patches.runtime_pm/0191-PM-Sleep-Remove-unnecessary-label-from-suspend_freez.patch [new file with mode: 0644]
patches.runtime_pm/0192-PM-Sleep-Unify-kerneldoc-comments-in-kernel-power-su.patch [new file with mode: 0644]
patches.runtime_pm/0193-PM-Sleep-Make-enter_state-in-kernel-power-suspend.c-.patch [new file with mode: 0644]
patches.runtime_pm/0194-PM-Sleep-Drop-suspend_stats_update.patch [new file with mode: 0644]
patches.runtime_pm/0195-PM-Add-comment-describing-relationships-between-PM-c.patch [new file with mode: 0644]
patches.runtime_pm/0196-PM-Hibernate-print-physical-addresses-consistently-w.patch [new file with mode: 0644]
patches.runtime_pm/0197-PM-Sleep-Fix-possible-infinite-loop-during-wakeup-so.patch [new file with mode: 0644]
patches.runtime_pm/0198-PM-Sleep-Fix-race-conditions-related-to-wakeup-sourc.patch [new file with mode: 0644]
patches.runtime_pm/0199-PM-Sleep-Add-more-wakeup-source-initialization-routi.patch [new file with mode: 0644]
patches.runtime_pm/0200-PM-Freezer-Remove-references-to-TIF_FREEZE-in-commen.patch [new file with mode: 0644]
patches.runtime_pm/0201-PM-Domains-Fix-include-for-PM_GENERIC_DOMAINS-n-case.patch [new file with mode: 0644]
patches.runtime_pm/0202-PM-QoS-Make-it-possible-to-expose-PM-QoS-latency-con.patch [new file with mode: 0644]
patches.runtime_pm/0203-PM-Domains-Fix-handling-of-wakeup-devices-during-sys.patch [new file with mode: 0644]
patches.runtime_pm/0204-PM-Domains-Fix-hibernation-restore-of-devices-v2.patch [new file with mode: 0644]
patches.runtime_pm/0205-PM-Domains-Introduce-always-on-device-flag.patch [new file with mode: 0644]
patches.runtime_pm/0206-PM-Domains-Check-domain-status-during-hibernation-re.patch [new file with mode: 0644]
patches.runtime_pm/0207-PM-Runtime-don-t-forget-to-wake-up-waitqueue-on-fail.patch [new file with mode: 0644]
patches.runtime_pm/0208-PM-Hibernate-Disable-usermode-helpers-right-before-f.patch [new file with mode: 0644]
patches.runtime_pm/0209-PM-Sleep-Move-disabling-of-usermode-helpers-to-the-f.patch [new file with mode: 0644]
patches.runtime_pm/0210-PM-QoS-add-pm_qos_update_request_timeout-API.patch [new file with mode: 0644]
series

diff --git a/patches.runtime_pm/0001-Revert-PM-Runtime-Automatically-retry-failed-autosus.patch b/patches.runtime_pm/0001-Revert-PM-Runtime-Automatically-retry-failed-autosus.patch
new file mode 100644 (file)
index 0000000..e81ab23
--- /dev/null
@@ -0,0 +1,78 @@
+From 9875e01a633d878cb016cff8a349da2a090b4720 Mon Sep 17 00:00:00 2001
+From: Simon Horman <horms@verge.net.au>
+Date: Sat, 21 Apr 2012 02:09:00 +0900
+Subject: Revert "PM / Runtime: Automatically retry failed autosuspends"
+
+This reverts commit 8dc9c7911421d8e45901ffaf483b5dca99cbb055.
+
+The origin of this change, 886486b792e4f6f96d4fbe8ec5bf20811cab7d6a,
+will be included in a fuller backport of PM which follows.
+The reason for reverting this is to avoid conflicts when
+backporting other PM fixes to the same file.
+---
+ Documentation/power/runtime_pm.txt |   10 ----------
+ drivers/base/power/runtime.c       |   18 ++----------------
+ 2 files changed, 2 insertions(+), 26 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 6ade987..b24875b 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -708,16 +708,6 @@ will behave normally, not taking the autosuspend delay into account.
+ Similarly, if the power.use_autosuspend field isn't set then the autosuspend
+ helper functions will behave just like the non-autosuspend counterparts.
+-Under some circumstances a driver or subsystem may want to prevent a device
+-from autosuspending immediately, even though the usage counter is zero and the
+-autosuspend delay time has expired.  If the ->runtime_suspend() callback
+-returns -EAGAIN or -EBUSY, and if the next autosuspend delay expiration time is
+-in the future (as it normally would be if the callback invoked
+-pm_runtime_mark_last_busy()), the PM core will automatically reschedule the
+-autosuspend.  The ->runtime_suspend() callback can't do this rescheduling
+-itself because no suspend requests of any kind are accepted while the device is
+-suspending (i.e., while the callback is running).
+-
+ The implementation is well suited for asynchronous use in interrupt contexts.
+ However such use inevitably involves races, because the PM core can't
+ synchronize ->runtime_suspend() callbacks with the arrival of I/O requests.
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 1023392..0d4587b 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -278,9 +278,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+  * If a deferred resume was requested while the callback was running then carry
+  * it out; otherwise send an idle notification for the device (if the suspend
+  * failed) or for its parent (if the suspend succeeded).
+- * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+- * flag is set and the next autosuspend-delay expiration time is in the
+- * future, schedule another autosuspend attempt.
+  *
+  * This function must be called under dev->power.lock with interrupts disabled.
+  */
+@@ -392,21 +389,10 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       if (retval) {
+               __update_runtime_status(dev, RPM_ACTIVE);
+               dev->power.deferred_resume = 0;
+-              if (retval == -EAGAIN || retval == -EBUSY) {
++              if (retval == -EAGAIN || retval == -EBUSY)
+                       dev->power.runtime_error = 0;
+-
+-                      /*
+-                       * If the callback routine failed an autosuspend, and
+-                       * if the last_busy time has been updated so that there
+-                       * is a new autosuspend expiration time, automatically
+-                       * reschedule another autosuspend.
+-                       */
+-                      if ((rpmflags & RPM_AUTO) &&
+-                          pm_runtime_autosuspend_expiration(dev) != 0)
+-                              goto repeat;
+-              } else {
++              else
+                       pm_runtime_cancel_pending(dev);
+-              }
+       } else {
+  no_callback:
+               __update_runtime_status(dev, RPM_SUSPENDED);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0002-PM-Domains-Rename-struct-dev_power_domain-to-struct-.patch b/patches.runtime_pm/0002-PM-Domains-Rename-struct-dev_power_domain-to-struct-.patch
new file mode 100644 (file)
index 0000000..9dd7155
--- /dev/null
@@ -0,0 +1,397 @@
+From 2693f3ae83d1066e9b83faafa2a92c933aee9667 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 23 Jun 2011 01:52:55 +0200
+Subject: PM / Domains: Rename struct dev_power_domain to struct dev_pm_domain
+
+The naming convention used by commit 7538e3db6e015e890825fbd9f86599b
+(PM: Add support for device power domains), which introduced the
+struct dev_power_domain type for representing device power domains,
+evidently confuses some developers who tend to think that objects
+of this type must correspond to "power domains" as defined by
+hardware, which is not the case.  Namely, at the kernel level, a
+struct dev_power_domain object can represent arbitrary set of devices
+that are mutually dependent power management-wise and need not belong
+to one hardware power domain.  To avoid that confusion, rename struct
+dev_power_domain to struct dev_pm_domain and rename the related
+pointers in struct device and struct pm_clk_notifier_block from
+pwr_domain to pm_domain.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 564b905ab10d17fb42f86aa8b7b9b796276d1336)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt          |    8 ++++----
+ arch/arm/mach-omap1/pm_bus.c             |    8 ++++----
+ arch/arm/mach-shmobile/pm_runtime.c      |    8 ++++----
+ arch/arm/plat-omap/omap_device.c         |    4 ++--
+ arch/sh/kernel/cpu/shmobile/pm_runtime.c |    6 +++---
+ drivers/base/power/clock_ops.c           |   14 +++++++-------
+ drivers/base/power/main.c                |   30 +++++++++++++++---------------
+ drivers/base/power/runtime.c             |   12 ++++++------
+ include/linux/device.h                   |    4 ++--
+ include/linux/pm.h                       |    2 +-
+ include/linux/pm_runtime.h               |    2 +-
+ 11 files changed, 49 insertions(+), 49 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 64565aa..85c6f98 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -506,8 +506,8 @@ routines.  Nevertheless, different callback pointers are used in case there is a
+ situation where it actually matters.
+-Device Power Domains
+---------------------
++Device Power Management Domains
++-------------------------------
+ Sometimes devices share reference clocks or other power resources.  In those
+ cases it generally is not possible to put devices into low-power states
+ individually.  Instead, a set of devices sharing a power resource can be put
+@@ -516,8 +516,8 @@ power resource.  Of course, they also need to be put into the full-power state
+ together, by turning the shared power resource on.  A set of devices with this
+ property is often referred to as a power domain.
+-Support for power domains is provided through the pwr_domain field of struct
+-device.  This field is a pointer to an object of type struct dev_power_domain,
++Support for power domains is provided through the pm_domain field of struct
++device.  This field is a pointer to an object of type struct dev_pm_domain,
+ defined in include/linux/pm.h, providing a set of power management callbacks
+ analogous to the subsystem-level and device driver callbacks that are executed
+ for the given device during all power transitions, instead of the respective
+diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
+index 334fb88..212f331 100644
+--- a/arch/arm/mach-omap1/pm_bus.c
++++ b/arch/arm/mach-omap1/pm_bus.c
+@@ -49,20 +49,20 @@ static int omap1_pm_runtime_resume(struct device *dev)
+       return pm_generic_runtime_resume(dev);
+ }
+-static struct dev_power_domain default_power_domain = {
++static struct dev_pm_domain default_pm_domain = {
+       .ops = {
+               .runtime_suspend = omap1_pm_runtime_suspend,
+               .runtime_resume = omap1_pm_runtime_resume,
+               USE_PLATFORM_PM_SLEEP_OPS
+       },
+ };
+-#define OMAP1_PWR_DOMAIN (&default_power_domain)
++#define OMAP1_PM_DOMAIN (&default_pm_domain)
+ #else
+-#define OMAP1_PWR_DOMAIN NULL
++#define OMAP1_PM_DOMAIN NULL
+ #endif /* CONFIG_PM_RUNTIME */
+ static struct pm_clk_notifier_block platform_bus_notifier = {
+-      .pwr_domain = OMAP1_PWR_DOMAIN,
++      .pm_domain = OMAP1_PM_DOMAIN,
+       .con_ids = { "ick", "fck", NULL, },
+ };
+diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
+index 2d1b67a..99802d2 100644
+--- a/arch/arm/mach-shmobile/pm_runtime.c
++++ b/arch/arm/mach-shmobile/pm_runtime.c
+@@ -28,7 +28,7 @@ static int default_platform_runtime_idle(struct device *dev)
+       return pm_runtime_suspend(dev);
+ }
+-static struct dev_power_domain default_power_domain = {
++static struct dev_pm_domain default_pm_domain = {
+       .ops = {
+               .runtime_suspend = pm_runtime_clk_suspend,
+               .runtime_resume = pm_runtime_clk_resume,
+@@ -37,16 +37,16 @@ static struct dev_power_domain default_power_domain = {
+       },
+ };
+-#define DEFAULT_PWR_DOMAIN_PTR        (&default_power_domain)
++#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
+ #else
+-#define DEFAULT_PWR_DOMAIN_PTR        NULL
++#define DEFAULT_PM_DOMAIN_PTR NULL
+ #endif /* CONFIG_PM_RUNTIME */
+ static struct pm_clk_notifier_block platform_bus_notifier = {
+-      .pwr_domain = DEFAULT_PWR_DOMAIN_PTR,
++      .pm_domain = DEFAULT_PM_DOMAIN_PTR,
+       .con_ids = { NULL, },
+ };
+diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
+index 49fc0df..d21579b 100644
+--- a/arch/arm/plat-omap/omap_device.c
++++ b/arch/arm/plat-omap/omap_device.c
+@@ -564,7 +564,7 @@ static int _od_runtime_resume(struct device *dev)
+       return pm_generic_runtime_resume(dev);
+ }
+-static struct dev_power_domain omap_device_power_domain = {
++static struct dev_pm_domain omap_device_pm_domain = {
+       .ops = {
+               .runtime_suspend = _od_runtime_suspend,
+               .runtime_idle = _od_runtime_idle,
+@@ -586,7 +586,7 @@ int omap_device_register(struct omap_device *od)
+       pr_debug("omap_device: %s: registering\n", od->pdev.name);
+       od->pdev.dev.parent = &omap_device_parent;
+-      od->pdev.dev.pwr_domain = &omap_device_power_domain;
++      od->pdev.dev.pm_domain = &omap_device_pm_domain;
+       return platform_device_register(&od->pdev);
+ }
+diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+index 64c807c..bf280c8 100644
+--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
++++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+@@ -256,7 +256,7 @@ out:
+       return ret;
+ }
+-static struct dev_power_domain default_power_domain = {
++static struct dev_pm_domain default_pm_domain = {
+       .ops = {
+               .runtime_suspend = default_platform_runtime_suspend,
+               .runtime_resume = default_platform_runtime_resume,
+@@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
+               hwblk_disable(hwblk_info, hwblk);
+               /* make sure driver re-inits itself once */
+               __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
+-              dev->pwr_domain = &default_power_domain;
++              dev->pm_domain = &default_pm_domain;
+               break;
+       /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
+       case BUS_NOTIFY_BOUND_DRIVER:
+@@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
+               __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
+               break;
+       case BUS_NOTIFY_DEL_DEVICE:
+-              dev->pwr_domain = NULL;
++              dev->pm_domain = NULL;
+               break;
+       }
+       return 0;
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index ad367c4..c562481 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -278,11 +278,11 @@ int pm_runtime_clk_resume(struct device *dev)
+  *
+  * For this function to work, @nb must be a member of an object of type
+  * struct pm_clk_notifier_block containing all of the requisite data.
+- * Specifically, the pwr_domain member of that object is copied to the device's
+- * pwr_domain field and its con_ids member is used to populate the device's list
++ * Specifically, the pm_domain member of that object is copied to the device's
++ * pm_domain field and its con_ids member is used to populate the device's list
+  * of runtime PM clocks, depending on @action.
+  *
+- * If the device's pwr_domain field is already populated with a value different
++ * If the device's pm_domain field is already populated with a value different
+  * from the one stored in the struct pm_clk_notifier_block object, the function
+  * does nothing.
+  */
+@@ -300,14 +300,14 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+       switch (action) {
+       case BUS_NOTIFY_ADD_DEVICE:
+-              if (dev->pwr_domain)
++              if (dev->pm_domain)
+                       break;
+               error = pm_runtime_clk_init(dev);
+               if (error)
+                       break;
+-              dev->pwr_domain = clknb->pwr_domain;
++              dev->pm_domain = clknb->pm_domain;
+               if (clknb->con_ids[0]) {
+                       for (con_id = clknb->con_ids; *con_id; con_id++)
+                               pm_runtime_clk_add(dev, *con_id);
+@@ -317,10 +317,10 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+               break;
+       case BUS_NOTIFY_DEL_DEVICE:
+-              if (dev->pwr_domain != clknb->pwr_domain)
++              if (dev->pm_domain != clknb->pm_domain)
+                       break;
+-              dev->pwr_domain = NULL;
++              dev->pm_domain = NULL;
+               pm_runtime_clk_destroy(dev);
+               break;
+       }
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 06f09bf..85b591a 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -425,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
+       TRACE_DEVICE(dev);
+       TRACE_RESUME(0);
+-      if (dev->pwr_domain) {
++      if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "EARLY power domain ");
+-              error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
++              error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+               pm_dev_dbg(dev, state, "EARLY type ");
+               error = pm_noirq_op(dev, dev->type->pm, state);
+@@ -521,9 +521,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+       if (!dev->power.is_suspended)
+               goto Unlock;
+-      if (dev->pwr_domain) {
++      if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "power domain ");
+-              error = pm_op(dev, &dev->pwr_domain->ops, state);
++              error = pm_op(dev, &dev->pm_domain->ops, state);
+               goto End;
+       }
+@@ -641,10 +641,10 @@ static void device_complete(struct device *dev, pm_message_t state)
+ {
+       device_lock(dev);
+-      if (dev->pwr_domain) {
++      if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "completing power domain ");
+-              if (dev->pwr_domain->ops.complete)
+-                      dev->pwr_domain->ops.complete(dev);
++              if (dev->pm_domain->ops.complete)
++                      dev->pm_domain->ops.complete(dev);
+       } else if (dev->type && dev->type->pm) {
+               pm_dev_dbg(dev, state, "completing type ");
+               if (dev->type->pm->complete)
+@@ -744,9 +744,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
+ {
+       int error;
+-      if (dev->pwr_domain) {
++      if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "LATE power domain ");
+-              error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
++              error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+               if (error)
+                       return error;
+       } else if (dev->type && dev->type->pm) {
+@@ -853,9 +853,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+               goto Unlock;
+       }
+-      if (dev->pwr_domain) {
++      if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "power domain ");
+-              error = pm_op(dev, &dev->pwr_domain->ops, state);
++              error = pm_op(dev, &dev->pm_domain->ops, state);
+               goto End;
+       }
+@@ -982,11 +982,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
+       device_lock(dev);
+-      if (dev->pwr_domain) {
++      if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "preparing power domain ");
+-              if (dev->pwr_domain->ops.prepare)
+-                      error = dev->pwr_domain->ops.prepare(dev);
+-              suspend_report_result(dev->pwr_domain->ops.prepare, error);
++              if (dev->pm_domain->ops.prepare)
++                      error = dev->pm_domain->ops.prepare(dev);
++              suspend_report_result(dev->pm_domain->ops.prepare, error);
+               if (error)
+                       goto End;
+       } else if (dev->type && dev->type->pm) {
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 0d4587b..5f5c423 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -213,8 +213,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
+       dev->power.idle_notification = true;
+-      if (dev->pwr_domain)
+-              callback = dev->pwr_domain->ops.runtime_idle;
++      if (dev->pm_domain)
++              callback = dev->pm_domain->ops.runtime_idle;
+       else if (dev->type && dev->type->pm)
+               callback = dev->type->pm->runtime_idle;
+       else if (dev->class && dev->class->pm)
+@@ -374,8 +374,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       __update_runtime_status(dev, RPM_SUSPENDING);
+-      if (dev->pwr_domain)
+-              callback = dev->pwr_domain->ops.runtime_suspend;
++      if (dev->pm_domain)
++              callback = dev->pm_domain->ops.runtime_suspend;
+       else if (dev->type && dev->type->pm)
+               callback = dev->type->pm->runtime_suspend;
+       else if (dev->class && dev->class->pm)
+@@ -573,8 +573,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
+       __update_runtime_status(dev, RPM_RESUMING);
+-      if (dev->pwr_domain)
+-              callback = dev->pwr_domain->ops.runtime_resume;
++      if (dev->pm_domain)
++              callback = dev->pm_domain->ops.runtime_resume;
+       else if (dev->type && dev->type->pm)
+               callback = dev->type->pm->runtime_resume;
+       else if (dev->class && dev->class->pm)
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 680656e..ad8ecfd 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -585,7 +585,7 @@ struct device_dma_parameters {
+  *            minimizes board-specific #ifdefs in drivers.
+  * @power:    For device power management.
+  *            See Documentation/power/devices.txt for details.
+- * @pwr_domain:       Provide callbacks that are executed during system suspend,
++ * @pm_domain:        Provide callbacks that are executed during system suspend,
+  *            hibernation, system resume and during runtime PM transitions
+  *            along with subsystem-level and driver-level callbacks.
+  * @numa_node:        NUMA node this device is close to.
+@@ -636,7 +636,7 @@ struct device {
+       void            *platform_data; /* Platform specific data, device
+                                          core doesn't touch it */
+       struct dev_pm_info      power;
+-      struct dev_power_domain *pwr_domain;
++      struct dev_pm_domain    *pm_domain;
+ #ifdef CONFIG_NUMA
+       int             numa_node;      /* NUMA node this device is close to */
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 411e4f4..e396320 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -472,7 +472,7 @@ extern void update_pm_runtime_accounting(struct device *dev);
+  * hibernation, system resume and during runtime PM transitions along with
+  * subsystem-level and driver-level callbacks.
+  */
+-struct dev_power_domain {
++struct dev_pm_domain {
+       struct dev_pm_ops       ops;
+ };
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 878cf84..ef91904 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -247,7 +247,7 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
+ struct pm_clk_notifier_block {
+       struct notifier_block nb;
+-      struct dev_power_domain *pwr_domain;
++      struct dev_pm_domain *pm_domain;
+       char *con_ids[];
+ };
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0003-PM-subsys_data-in-struct-dev_pm_info-need-not-depend.patch b/patches.runtime_pm/0003-PM-subsys_data-in-struct-dev_pm_info-need-not-depend.patch
new file mode 100644 (file)
index 0000000..d80f148
--- /dev/null
@@ -0,0 +1,35 @@
+From 475efc5f01d3a5fa3641c6e11e5f1981ddfaa52c Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 23 Jun 2011 01:53:04 +0200
+Subject: PM: subsys_data in struct dev_pm_info need not depend on RM_RUNTIME
+
+The subsys_data field of struct dev_pm_info, introduced by commit
+1d2b71f61b6a10216274e27b717becf9ae101fc7 (PM / Runtime: Add subsystem
+data field to struct dev_pm_info), is going to be used even if
+CONFIG_PM_RUNTIME is not set, so move it from under the #ifdef.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit dc6e4e56e6ef473a696a1ab24f80b79b9aceb92d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index e396320..7e8f076 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -461,8 +461,8 @@ struct dev_pm_info {
+       unsigned long           active_jiffies;
+       unsigned long           suspended_jiffies;
+       unsigned long           accounting_timestamp;
+-      void                    *subsys_data;  /* Owned by the subsystem. */
+ #endif
++      void                    *subsys_data;  /* Owned by the subsystem. */
+ };
+ extern void update_pm_runtime_accounting(struct device *dev);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0004-PM-Domains-Support-for-generic-I-O-PM-domains-v8.patch b/patches.runtime_pm/0004-PM-Domains-Support-for-generic-I-O-PM-domains-v8.patch
new file mode 100644 (file)
index 0000000..aa72cbe
--- /dev/null
@@ -0,0 +1,656 @@
+From fbbeb16d1177d1a4de3242dc5e7c7bfd7ffeb10c Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:12:45 +0200
+Subject: PM / Domains: Support for generic I/O PM domains (v8)
+
+Introduce common headers, helper functions and callbacks allowing
+platforms to use simple generic power domains for runtime power
+management.
+
+Introduce struct generic_pm_domain to be used for representing
+power domains that each contain a number of devices and may be
+parent domains or subdomains with respect to other power domains.
+Among other things, this structure includes callbacks to be
+provided by platforms for performing specific tasks related to
+power management (i.e. ->stop_device() may disable a device's
+clocks, while ->start_device() may enable them, ->power_off() is
+supposed to remove power from the entire power domain
+and ->power_on() is supposed to restore it).
+
+Introduce functions that can be used as power domain runtime PM
+callbacks, pm_genpd_runtime_suspend() and pm_genpd_runtime_resume(),
+as well as helper functions for the initialization of a power
+domain represented by a struct generic_power_domain object,
+adding a device to or removing a device from it and adding or
+removing subdomains.
+
+Introduce configuration option CONFIG_PM_GENERIC_DOMAINS to be
+selected by the platforms that want to use the new code.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit f721889ff65afa6243c463832c74dee3bed418d5)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/Makefile |    1 +
+ drivers/base/power/domain.c |  494 +++++++++++++++++++++++++++++++++++++++++++
+ include/linux/pm_domain.h   |   78 +++++++
+ kernel/power/Kconfig        |    4 +
+ 4 files changed, 577 insertions(+)
+ create mode 100644 drivers/base/power/domain.c
+ create mode 100644 include/linux/pm_domain.h
+
+diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
+index 3647e11..2639ae7 100644
+--- a/drivers/base/power/Makefile
++++ b/drivers/base/power/Makefile
+@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
+ obj-$(CONFIG_PM_RUNTIME)      += runtime.o
+ obj-$(CONFIG_PM_TRACE_RTC)    += trace.o
+ obj-$(CONFIG_PM_OPP)  += opp.o
++obj-$(CONFIG_PM_GENERIC_DOMAINS)      +=  domain.o
+ obj-$(CONFIG_HAVE_CLK)        += clock_ops.o
+ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+\ No newline at end of file
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+new file mode 100644
+index 0000000..fd31be3
+--- /dev/null
++++ b/drivers/base/power/domain.c
+@@ -0,0 +1,494 @@
++/*
++ * drivers/base/power/domain.c - Common code related to device power domains.
++ *
++ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
++ *
++ * This file is released under the GPLv2.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/pm_runtime.h>
++#include <linux/pm_domain.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++
++#ifdef CONFIG_PM_RUNTIME
++
++static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
++{
++      if (!WARN_ON(genpd->sd_count == 0))
++                      genpd->sd_count--;
++}
++
++/**
++ * __pm_genpd_save_device - Save the pre-suspend state of a device.
++ * @dle: Device list entry of the device to save the state of.
++ * @genpd: PM domain the device belongs to.
++ */
++static int __pm_genpd_save_device(struct dev_list_entry *dle,
++                                struct generic_pm_domain *genpd)
++{
++      struct device *dev = dle->dev;
++      struct device_driver *drv = dev->driver;
++      int ret = 0;
++
++      if (dle->need_restore)
++              return 0;
++
++      if (drv && drv->pm && drv->pm->runtime_suspend) {
++              if (genpd->start_device)
++                      genpd->start_device(dev);
++
++              ret = drv->pm->runtime_suspend(dev);
++
++              if (genpd->stop_device)
++                      genpd->stop_device(dev);
++      }
++
++      if (!ret)
++              dle->need_restore = true;
++
++      return ret;
++}
++
++/**
++ * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
++ * @dle: Device list entry of the device to restore the state of.
++ * @genpd: PM domain the device belongs to.
++ */
++static void __pm_genpd_restore_device(struct dev_list_entry *dle,
++                                    struct generic_pm_domain *genpd)
++{
++      struct device *dev = dle->dev;
++      struct device_driver *drv = dev->driver;
++
++      if (!dle->need_restore)
++              return;
++
++      if (drv && drv->pm && drv->pm->runtime_resume) {
++              if (genpd->start_device)
++                      genpd->start_device(dev);
++
++              drv->pm->runtime_resume(dev);
++
++              if (genpd->stop_device)
++                      genpd->stop_device(dev);
++      }
++
++      dle->need_restore = false;
++}
++
++/**
++ * pm_genpd_poweroff - Remove power from a given PM domain.
++ * @genpd: PM domain to power down.
++ *
++ * If all of the @genpd's devices have been suspended and all of its subdomains
++ * have been powered down, run the runtime suspend callbacks provided by all of
++ * the @genpd's devices' drivers and remove power from @genpd.
++ */
++static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
++{
++      struct generic_pm_domain *parent;
++      struct dev_list_entry *dle;
++      unsigned int not_suspended;
++      int ret;
++
++      if (genpd->power_is_off)
++              return 0;
++
++      if (genpd->sd_count > 0)
++              return -EBUSY;
++
++      not_suspended = 0;
++      list_for_each_entry(dle, &genpd->dev_list, node)
++              if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
++                      not_suspended++;
++
++      if (not_suspended > genpd->in_progress)
++              return -EBUSY;
++
++      if (genpd->gov && genpd->gov->power_down_ok) {
++              if (!genpd->gov->power_down_ok(&genpd->domain))
++                      return -EAGAIN;
++      }
++
++      list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
++              ret = __pm_genpd_save_device(dle, genpd);
++              if (ret)
++                      goto err_dev;
++      }
++
++      if (genpd->power_off)
++              genpd->power_off(genpd);
++
++      genpd->power_is_off = true;
++
++      parent = genpd->parent;
++      if (parent) {
++              genpd_sd_counter_dec(parent);
++              if (parent->sd_count == 0)
++                      queue_work(pm_wq, &parent->power_off_work);
++      }
++
++      return 0;
++
++ err_dev:
++      list_for_each_entry_continue(dle, &genpd->dev_list, node)
++              __pm_genpd_restore_device(dle, genpd);
++
++      return ret;
++}
++
++/**
++ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
++ * @work: Work structure used for scheduling the execution of this function.
++ */
++static void genpd_power_off_work_fn(struct work_struct *work)
++{
++      struct generic_pm_domain *genpd;
++
++      genpd = container_of(work, struct generic_pm_domain, power_off_work);
++
++      if (genpd->parent)
++              mutex_lock(&genpd->parent->lock);
++      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++      pm_genpd_poweroff(genpd);
++      mutex_unlock(&genpd->lock);
++      if (genpd->parent)
++              mutex_unlock(&genpd->parent->lock);
++}
++
++/**
++ * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
++ * @dev: Device to suspend.
++ *
++ * Carry out a runtime suspend of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
++ */
++static int pm_genpd_runtime_suspend(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      if (IS_ERR_OR_NULL(dev->pm_domain))
++              return -EINVAL;
++
++      genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain);
++
++      if (genpd->parent)
++              mutex_lock(&genpd->parent->lock);
++      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++
++      if (genpd->stop_device) {
++              int ret = genpd->stop_device(dev);
++              if (ret)
++                      goto out;
++      }
++      genpd->in_progress++;
++      pm_genpd_poweroff(genpd);
++      genpd->in_progress--;
++
++ out:
++      mutex_unlock(&genpd->lock);
++      if (genpd->parent)
++              mutex_unlock(&genpd->parent->lock);
++
++      return 0;
++}
++
++/**
++ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
++ * @genpd: PM domain to power up.
++ *
++ * Restore power to @genpd and all of its parents so that it is possible to
++ * resume a device belonging to it.
++ */
++static int pm_genpd_poweron(struct generic_pm_domain *genpd)
++{
++      int ret = 0;
++
++ start:
++      if (genpd->parent)
++              mutex_lock(&genpd->parent->lock);
++      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++
++      if (!genpd->power_is_off)
++              goto out;
++
++      if (genpd->parent && genpd->parent->power_is_off) {
++              mutex_unlock(&genpd->lock);
++              mutex_unlock(&genpd->parent->lock);
++
++              ret = pm_genpd_poweron(genpd->parent);
++              if (ret)
++                      return ret;
++
++              goto start;
++      }
++
++      if (genpd->power_on) {
++              int ret = genpd->power_on(genpd);
++              if (ret)
++                      goto out;
++      }
++
++      genpd->power_is_off = false;
++      if (genpd->parent)
++              genpd->parent->sd_count++;
++
++ out:
++      mutex_unlock(&genpd->lock);
++      if (genpd->parent)
++              mutex_unlock(&genpd->parent->lock);
++
++      return ret;
++}
++
++/**
++ * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
++ * @dev: Device to resume.
++ *
++ * Carry out a runtime resume of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
++ */
++static int pm_genpd_runtime_resume(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++      struct dev_list_entry *dle;
++      int ret;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      if (IS_ERR_OR_NULL(dev->pm_domain))
++              return -EINVAL;
++
++      genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain);
++
++      ret = pm_genpd_poweron(genpd);
++      if (ret)
++              return ret;
++
++      mutex_lock(&genpd->lock);
++
++      list_for_each_entry(dle, &genpd->dev_list, node) {
++              if (dle->dev == dev) {
++                      __pm_genpd_restore_device(dle, genpd);
++                      break;
++              }
++      }
++
++      if (genpd->start_device)
++              genpd->start_device(dev);
++
++      mutex_unlock(&genpd->lock);
++
++      return 0;
++}
++
++#else
++
++static inline void genpd_power_off_work_fn(struct work_struct *work) {}
++
++#define pm_genpd_runtime_suspend      NULL
++#define pm_genpd_runtime_resume               NULL
++
++#endif /* CONFIG_PM_RUNTIME */
++
++/**
++ * pm_genpd_add_device - Add a device to an I/O PM domain.
++ * @genpd: PM domain to add the device to.
++ * @dev: Device to be added.
++ */
++int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
++{
++      struct dev_list_entry *dle;
++      int ret = 0;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
++              return -EINVAL;
++
++      mutex_lock(&genpd->lock);
++
++      if (genpd->power_is_off) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      list_for_each_entry(dle, &genpd->dev_list, node)
++              if (dle->dev == dev) {
++                      ret = -EINVAL;
++                      goto out;
++              }
++
++      dle = kzalloc(sizeof(*dle), GFP_KERNEL);
++      if (!dle) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      dle->dev = dev;
++      dle->need_restore = false;
++      list_add_tail(&dle->node, &genpd->dev_list);
++
++      spin_lock_irq(&dev->power.lock);
++      dev->pm_domain = &genpd->domain;
++      spin_unlock_irq(&dev->power.lock);
++
++ out:
++      mutex_unlock(&genpd->lock);
++
++      return ret;
++}
++
++/**
++ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
++ * @genpd: PM domain to remove the device from.
++ * @dev: Device to be removed.
++ */
++int pm_genpd_remove_device(struct generic_pm_domain *genpd,
++                         struct device *dev)
++{
++      struct dev_list_entry *dle;
++      int ret = -EINVAL;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
++              return -EINVAL;
++
++      mutex_lock(&genpd->lock);
++
++      list_for_each_entry(dle, &genpd->dev_list, node) {
++              if (dle->dev != dev)
++                      continue;
++
++              spin_lock_irq(&dev->power.lock);
++              dev->pm_domain = NULL;
++              spin_unlock_irq(&dev->power.lock);
++
++              list_del(&dle->node);
++              kfree(dle);
++
++              ret = 0;
++              break;
++      }
++
++      mutex_unlock(&genpd->lock);
++
++      return ret;
++}
++
++/**
++ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
++ * @genpd: Master PM domain to add the subdomain to.
++ * @new_subdomain: Subdomain to be added.
++ */
++int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
++                         struct generic_pm_domain *new_subdomain)
++{
++      struct generic_pm_domain *subdomain;
++      int ret = 0;
++
++      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
++              return -EINVAL;
++
++      mutex_lock(&genpd->lock);
++
++      if (genpd->power_is_off && !new_subdomain->power_is_off) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
++              if (subdomain == new_subdomain) {
++                      ret = -EINVAL;
++                      goto out;
++              }
++      }
++
++      mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
++
++      list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
++      new_subdomain->parent = genpd;
++      if (!subdomain->power_is_off)
++              genpd->sd_count++;
++
++      mutex_unlock(&new_subdomain->lock);
++
++ out:
++      mutex_unlock(&genpd->lock);
++
++      return ret;
++}
++
++/**
++ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
++ * @genpd: Master PM domain to remove the subdomain from.
++ * @target: Subdomain to be removed.
++ */
++int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
++                            struct generic_pm_domain *target)
++{
++      struct generic_pm_domain *subdomain;
++      int ret = -EINVAL;
++
++      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
++              return -EINVAL;
++
++      mutex_lock(&genpd->lock);
++
++      list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
++              if (subdomain != target)
++                      continue;
++
++              mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
++
++              list_del(&subdomain->sd_node);
++              subdomain->parent = NULL;
++              if (!subdomain->power_is_off)
++                      genpd_sd_counter_dec(genpd);
++
++              mutex_unlock(&subdomain->lock);
++
++              ret = 0;
++              break;
++      }
++
++      mutex_unlock(&genpd->lock);
++
++      return ret;
++}
++
++/**
++ * pm_genpd_init - Initialize a generic I/O PM domain object.
++ * @genpd: PM domain object to initialize.
++ * @gov: PM domain governor to associate with the domain (may be NULL).
++ * @is_off: Initial value of the domain's power_is_off field.
++ */
++void pm_genpd_init(struct generic_pm_domain *genpd,
++                 struct dev_power_governor *gov, bool is_off)
++{
++      if (IS_ERR_OR_NULL(genpd))
++              return;
++
++      INIT_LIST_HEAD(&genpd->sd_node);
++      genpd->parent = NULL;
++      INIT_LIST_HEAD(&genpd->dev_list);
++      INIT_LIST_HEAD(&genpd->sd_list);
++      mutex_init(&genpd->lock);
++      genpd->gov = gov;
++      INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
++      genpd->in_progress = 0;
++      genpd->sd_count = 0;
++      genpd->power_is_off = is_off;
++      genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
++      genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
++      genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
++}
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+new file mode 100644
+index 0000000..b1a22c6
+--- /dev/null
++++ b/include/linux/pm_domain.h
+@@ -0,0 +1,78 @@
++/*
++ * pm_domain.h - Definitions and headers related to device power domains.
++ *
++ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
++ *
++ * This file is released under the GPLv2.
++ */
++
++#ifndef _LINUX_PM_DOMAIN_H
++#define _LINUX_PM_DOMAIN_H
++
++#include <linux/device.h>
++
++struct dev_power_governor {
++      bool (*power_down_ok)(struct dev_pm_domain *domain);
++};
++
++struct generic_pm_domain {
++      struct dev_pm_domain domain;    /* PM domain operations */
++      struct list_head sd_node;       /* Node in the parent's subdomain list */
++      struct generic_pm_domain *parent;       /* Parent PM domain */
++      struct list_head sd_list;       /* List of dubdomains */
++      struct list_head dev_list;      /* List of devices */
++      struct mutex lock;
++      struct dev_power_governor *gov;
++      struct work_struct power_off_work;
++      unsigned int in_progress;       /* Number of devices being suspended now */
++      unsigned int sd_count;  /* Number of subdomains with power "on" */
++      bool power_is_off;      /* Whether or not power has been removed */
++      int (*power_off)(struct generic_pm_domain *domain);
++      int (*power_on)(struct generic_pm_domain *domain);
++      int (*start_device)(struct device *dev);
++      int (*stop_device)(struct device *dev);
++};
++
++struct dev_list_entry {
++      struct list_head node;
++      struct device *dev;
++      bool need_restore;
++};
++
++#ifdef CONFIG_PM_GENERIC_DOMAINS
++extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
++                             struct device *dev);
++extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
++                                struct device *dev);
++extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
++                                struct generic_pm_domain *new_subdomain);
++extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
++                                   struct generic_pm_domain *target);
++extern void pm_genpd_init(struct generic_pm_domain *genpd,
++                        struct dev_power_governor *gov, bool is_off);
++#else
++static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
++                                    struct device *dev)
++{
++      return -ENOSYS;
++}
++static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
++                                       struct device *dev)
++{
++      return -ENOSYS;
++}
++static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
++                                       struct generic_pm_domain *new_sd)
++{
++      return -ENOSYS;
++}
++static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
++                                          struct generic_pm_domain *target)
++{
++      return -ENOSYS;
++}
++static inline void pm_genpd_init(struct generic_pm_domain *genpd,
++                               struct dev_power_governor *gov, bool is_off) {}
++#endif
++
++#endif /* _LINUX_PM_DOMAIN_H */
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index 87f4d24..e83ac25 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -227,3 +227,7 @@ config PM_OPP
+ config PM_RUNTIME_CLK
+       def_bool y
+       depends on PM_RUNTIME && HAVE_CLK
++
++config PM_GENERIC_DOMAINS
++      bool
++      depends on PM
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0005-PM-Introduce-generic-noirq-callback-routines-for-sub.patch b/patches.runtime_pm/0005-PM-Introduce-generic-noirq-callback-routines-for-sub.patch
new file mode 100644 (file)
index 0000000..57e5966
--- /dev/null
@@ -0,0 +1,340 @@
+From 1e0186440adf4fb452073ff23de4e99b06f07fdc Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:12:59 +0200
+Subject: PM: Introduce generic "noirq" callback routines for subsystems (v2)
+
+Introduce generic "noirq" power management callback routines for
+subsystems in addition to the "regular" generic PM callback routines.
+
+The new routines will be used, among other things, for implementing
+system-wide PM transitions support for generic PM domains.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e5291928839877f8e73c2643ee1d3fe0bcdcaf5c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   32 +++++++++++-
+ drivers/base/power/generic_ops.c   |   98 ++++++++++++++++++++++++++++++------
+ include/linux/pm.h                 |    6 +++
+ 3 files changed, 119 insertions(+), 17 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index b24875b..4b011b1 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -606,32 +606,60 @@ driver/base/power/generic_ops.c:
+       callback provided by its driver and return its result, or return 0 if not
+       defined
++  int pm_generic_suspend_noirq(struct device *dev);
++    - if pm_runtime_suspended(dev) returns "false", invoke the ->suspend_noirq()
++      callback provided by the device's driver and return its result, or return
++      0 if not defined
++
+   int pm_generic_resume(struct device *dev);
+     - invoke the ->resume() callback provided by the driver of this device and,
+       if successful, change the device's runtime PM status to 'active'
++  int pm_generic_resume_noirq(struct device *dev);
++    - invoke the ->resume_noirq() callback provided by the driver of this device
++
+   int pm_generic_freeze(struct device *dev);
+     - if the device has not been suspended at run time, invoke the ->freeze()
+       callback provided by its driver and return its result, or return 0 if not
+       defined
++  int pm_generic_freeze_noirq(struct device *dev);
++    - if pm_runtime_suspended(dev) returns "false", invoke the ->freeze_noirq()
++      callback provided by the device's driver and return its result, or return
++      0 if not defined
++
+   int pm_generic_thaw(struct device *dev);
+     - if the device has not been suspended at run time, invoke the ->thaw()
+       callback provided by its driver and return its result, or return 0 if not
+       defined
++  int pm_generic_thaw_noirq(struct device *dev);
++    - if pm_runtime_suspended(dev) returns "false", invoke the ->thaw_noirq()
++      callback provided by the device's driver and return its result, or return
++      0 if not defined
++
+   int pm_generic_poweroff(struct device *dev);
+     - if the device has not been suspended at run time, invoke the ->poweroff()
+       callback provided by its driver and return its result, or return 0 if not
+       defined
++  int pm_generic_poweroff_noirq(struct device *dev);
++    - if pm_runtime_suspended(dev) returns "false", run the ->poweroff_noirq()
++      callback provided by the device's driver and return its result, or return
++      0 if not defined
++
+   int pm_generic_restore(struct device *dev);
+     - invoke the ->restore() callback provided by the driver of this device and,
+       if successful, change the device's runtime PM status to 'active'
++  int pm_generic_restore_noirq(struct device *dev);
++    - invoke the ->restore_noirq() callback provided by the device's driver
++
+ These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(),
+-->runtime_resume(), ->suspend(), ->resume(), ->freeze(), ->thaw(), ->poweroff(),
+-or ->restore() callback pointers in the subsystem-level dev_pm_ops structures.
++->runtime_resume(), ->suspend(), ->suspend_noirq(), ->resume(),
++->resume_noirq(), ->freeze(), ->freeze_noirq(), ->thaw(), ->thaw_noirq(),
++->poweroff(), ->poweroff_noirq(), ->restore(), ->restore_noirq() callback
++pointers in the subsystem-level dev_pm_ops structures.
+ If a subsystem wishes to use all of them at the same time, it can simply assign
+ the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its
+diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
+index cb3bb36..9508df7 100644
+--- a/drivers/base/power/generic_ops.c
++++ b/drivers/base/power/generic_ops.c
+@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
+  * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
+  * @dev: Device to handle.
+  * @event: PM transition of the system under way.
++ * @bool: Whether or not this is the "noirq" stage.
+  *
+  * If the device has not been suspended at run time, execute the
+  * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
+  * return its error code.  Otherwise, return zero.
+  */
+-static int __pm_generic_call(struct device *dev, int event)
++static int __pm_generic_call(struct device *dev, int event, bool noirq)
+ {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*callback)(struct device *);
+@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
+       switch (event) {
+       case PM_EVENT_SUSPEND:
+-              callback = pm->suspend;
++              callback = noirq ? pm->suspend_noirq : pm->suspend;
+               break;
+       case PM_EVENT_FREEZE:
+-              callback = pm->freeze;
++              callback = noirq ? pm->freeze_noirq : pm->freeze;
+               break;
+       case PM_EVENT_HIBERNATE:
+-              callback = pm->poweroff;
++              callback = noirq ? pm->poweroff_noirq : pm->poweroff;
+               break;
+       case PM_EVENT_THAW:
+-              callback = pm->thaw;
++              callback = noirq ? pm->thaw_noirq : pm->thaw;
+               break;
+       default:
+               callback = NULL;
+@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
+ }
+ /**
++ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
++ * @dev: Device to suspend.
++ */
++int pm_generic_suspend_noirq(struct device *dev)
++{
++      return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
++}
++EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
++
++/**
+  * pm_generic_suspend - Generic suspend callback for subsystems.
+  * @dev: Device to suspend.
+  */
+ int pm_generic_suspend(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_SUSPEND);
++      return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_suspend);
+ /**
++ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
++ * @dev: Device to freeze.
++ */
++int pm_generic_freeze_noirq(struct device *dev)
++{
++      return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
++}
++EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
++
++/**
+  * pm_generic_freeze - Generic freeze callback for subsystems.
+  * @dev: Device to freeze.
+  */
+ int pm_generic_freeze(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_FREEZE);
++      return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_freeze);
+ /**
++ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
++ * @dev: Device to handle.
++ */
++int pm_generic_poweroff_noirq(struct device *dev)
++{
++      return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
++}
++EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
++
++/**
+  * pm_generic_poweroff - Generic poweroff callback for subsystems.
+  * @dev: Device to handle.
+  */
+ int pm_generic_poweroff(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
++      return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+ /**
++ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
++ * @dev: Device to thaw.
++ */
++int pm_generic_thaw_noirq(struct device *dev)
++{
++      return __pm_generic_call(dev, PM_EVENT_THAW, true);
++}
++EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
++
++/**
+  * pm_generic_thaw - Generic thaw callback for subsystems.
+  * @dev: Device to thaw.
+  */
+ int pm_generic_thaw(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_THAW);
++      return __pm_generic_call(dev, PM_EVENT_THAW, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_thaw);
+@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
+  * __pm_generic_resume - Generic resume/restore callback for subsystems.
+  * @dev: Device to handle.
+  * @event: PM transition of the system under way.
++ * @bool: Whether or not this is the "noirq" stage.
+  *
+  * Execute the resume/resotre callback provided by the @dev's driver, if
+  * defined.  If it returns 0, change the device's runtime PM status to 'active'.
+  * Return the callback's error code.
+  */
+-static int __pm_generic_resume(struct device *dev, int event)
++static int __pm_generic_resume(struct device *dev, int event, bool noirq)
+ {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*callback)(struct device *);
+@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
+       switch (event) {
+       case PM_EVENT_RESUME:
+-              callback = pm->resume;
++              callback = noirq ? pm->resume_noirq : pm->resume;
+               break;
+       case PM_EVENT_RESTORE:
+-              callback = pm->restore;
++              callback = noirq ? pm->restore_noirq : pm->restore;
+               break;
+       default:
+               callback = NULL;
+@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
+               return 0;
+       ret = callback(dev);
+-      if (!ret && pm_runtime_enabled(dev)) {
++      if (!ret && !noirq && pm_runtime_enabled(dev)) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
+ }
+ /**
++ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
++ * @dev: Device to resume.
++ */
++int pm_generic_resume_noirq(struct device *dev)
++{
++      return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
++}
++EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
++
++/**
+  * pm_generic_resume - Generic resume callback for subsystems.
+  * @dev: Device to resume.
+  */
+ int pm_generic_resume(struct device *dev)
+ {
+-      return __pm_generic_resume(dev, PM_EVENT_RESUME);
++      return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_resume);
+ /**
++ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
++ * @dev: Device to restore.
++ */
++int pm_generic_restore_noirq(struct device *dev)
++{
++      return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
++}
++EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
++
++/**
+  * pm_generic_restore - Generic restore callback for subsystems.
+  * @dev: Device to restore.
+  */
+ int pm_generic_restore(struct device *dev)
+ {
+-      return __pm_generic_resume(dev, PM_EVENT_RESTORE);
++      return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_restore);
+@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
+ #ifdef CONFIG_PM_SLEEP
+       .prepare = pm_generic_prepare,
+       .suspend = pm_generic_suspend,
++      .suspend_noirq = pm_generic_suspend_noirq,
+       .resume = pm_generic_resume,
++      .resume_noirq = pm_generic_resume_noirq,
+       .freeze = pm_generic_freeze,
++      .freeze_noirq = pm_generic_freeze_noirq,
+       .thaw = pm_generic_thaw,
++      .thaw_noirq = pm_generic_thaw_noirq,
+       .poweroff = pm_generic_poweroff,
++      .poweroff_noirq = pm_generic_poweroff_noirq,
+       .restore = pm_generic_restore,
++      .restore_noirq = pm_generic_restore_noirq,
+       .complete = pm_generic_complete,
+ #endif
+ #ifdef CONFIG_PM_RUNTIME
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 7e8f076..f7c84c9 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -553,11 +553,17 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
+ extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
+ extern int pm_generic_prepare(struct device *dev);
++extern int pm_generic_suspend_noirq(struct device *dev);
+ extern int pm_generic_suspend(struct device *dev);
++extern int pm_generic_resume_noirq(struct device *dev);
+ extern int pm_generic_resume(struct device *dev);
++extern int pm_generic_freeze_noirq(struct device *dev);
+ extern int pm_generic_freeze(struct device *dev);
++extern int pm_generic_thaw_noirq(struct device *dev);
+ extern int pm_generic_thaw(struct device *dev);
++extern int pm_generic_restore_noirq(struct device *dev);
+ extern int pm_generic_restore(struct device *dev);
++extern int pm_generic_poweroff_noirq(struct device *dev);
+ extern int pm_generic_poweroff(struct device *dev);
+ extern void pm_generic_complete(struct device *dev);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0006-PM-Domains-Move-code-from-under-ifdef-CONFIG_PM_RUNT.patch b/patches.runtime_pm/0006-PM-Domains-Move-code-from-under-ifdef-CONFIG_PM_RUNT.patch
new file mode 100644 (file)
index 0000000..372f7df
--- /dev/null
@@ -0,0 +1,184 @@
+From 5eb38f7619fb43ea0097cefa9dbf005f18cc42a1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:13:10 +0200
+Subject: PM / Domains: Move code from under #ifdef CONFIG_PM_RUNTIME (v2)
+
+There is some code in drivers/base/power/domain.c that will be useful
+for both runtime PM and system-wide power transitions, so make it
+depend on CONFIG_PM instead of CONFIG_PM_RUNTIME.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 5248051b9afb6684cd817b2fbdaefa5063761dab)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  120 +++++++++++++++++++++++--------------------
+ 1 file changed, 65 insertions(+), 55 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index fd31be3..f14ba32 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -14,7 +14,15 @@
+ #include <linux/slab.h>
+ #include <linux/err.h>
+-#ifdef CONFIG_PM_RUNTIME
++#ifdef CONFIG_PM
++
++static struct generic_pm_domain *dev_to_genpd(struct device *dev)
++{
++      if (IS_ERR_OR_NULL(dev->pm_domain))
++              return ERR_PTR(-EINVAL);
++
++      return container_of(dev->pm_domain, struct generic_pm_domain, domain);
++}
+ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+ {
+@@ -23,6 +31,58 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+ }
+ /**
++ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
++ * @genpd: PM domain to power up.
++ *
++ * Restore power to @genpd and all of its parents so that it is possible to
++ * resume a device belonging to it.
++ */
++static int pm_genpd_poweron(struct generic_pm_domain *genpd)
++{
++      int ret = 0;
++
++ start:
++      if (genpd->parent)
++              mutex_lock(&genpd->parent->lock);
++      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++
++      if (!genpd->power_is_off)
++              goto out;
++
++      if (genpd->parent && genpd->parent->power_is_off) {
++              mutex_unlock(&genpd->lock);
++              mutex_unlock(&genpd->parent->lock);
++
++              ret = pm_genpd_poweron(genpd->parent);
++              if (ret)
++                      return ret;
++
++              goto start;
++      }
++
++      if (genpd->power_on) {
++              int ret = genpd->power_on(genpd);
++              if (ret)
++                      goto out;
++      }
++
++      genpd->power_is_off = false;
++      if (genpd->parent)
++              genpd->parent->sd_count++;
++
++ out:
++      mutex_unlock(&genpd->lock);
++      if (genpd->parent)
++              mutex_unlock(&genpd->parent->lock);
++
++      return ret;
++}
++
++#endif /* CONFIG_PM */
++
++#ifdef CONFIG_PM_RUNTIME
++
++/**
+  * __pm_genpd_save_device - Save the pre-suspend state of a device.
+  * @dle: Device list entry of the device to save the state of.
+  * @genpd: PM domain the device belongs to.
+@@ -174,11 +234,10 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+       dev_dbg(dev, "%s()\n", __func__);
+-      if (IS_ERR_OR_NULL(dev->pm_domain))
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
+               return -EINVAL;
+-      genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain);
+-
+       if (genpd->parent)
+               mutex_lock(&genpd->parent->lock);
+       mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+@@ -201,54 +260,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+ }
+ /**
+- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+- * @genpd: PM domain to power up.
+- *
+- * Restore power to @genpd and all of its parents so that it is possible to
+- * resume a device belonging to it.
+- */
+-static int pm_genpd_poweron(struct generic_pm_domain *genpd)
+-{
+-      int ret = 0;
+-
+- start:
+-      if (genpd->parent)
+-              mutex_lock(&genpd->parent->lock);
+-      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-
+-      if (!genpd->power_is_off)
+-              goto out;
+-
+-      if (genpd->parent && genpd->parent->power_is_off) {
+-              mutex_unlock(&genpd->lock);
+-              mutex_unlock(&genpd->parent->lock);
+-
+-              ret = pm_genpd_poweron(genpd->parent);
+-              if (ret)
+-                      return ret;
+-
+-              goto start;
+-      }
+-
+-      if (genpd->power_on) {
+-              int ret = genpd->power_on(genpd);
+-              if (ret)
+-                      goto out;
+-      }
+-
+-      genpd->power_is_off = false;
+-      if (genpd->parent)
+-              genpd->parent->sd_count++;
+-
+- out:
+-      mutex_unlock(&genpd->lock);
+-      if (genpd->parent)
+-              mutex_unlock(&genpd->parent->lock);
+-
+-      return ret;
+-}
+-
+-/**
+  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+  * @dev: Device to resume.
+  *
+@@ -264,11 +275,10 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       dev_dbg(dev, "%s()\n", __func__);
+-      if (IS_ERR_OR_NULL(dev->pm_domain))
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
+               return -EINVAL;
+-      genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain);
+-
+       ret = pm_genpd_poweron(genpd);
+       if (ret)
+               return ret;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0007-PM-Domains-System-wide-transitions-support-for-gener.patch b/patches.runtime_pm/0007-PM-Domains-System-wide-transitions-support-for-gener.patch
new file mode 100644 (file)
index 0000000..9487f1d
--- /dev/null
@@ -0,0 +1,735 @@
+From e09ec39629e6f819c4828081038af88e531664cd Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:13:19 +0200
+Subject: PM / Domains: System-wide transitions support for generic domains
+ (v5)
+
+Make generic PM domains support system-wide power transitions
+(system suspend and hibernation).  Add suspend, resume, freeze, thaw,
+poweroff and restore callbacks to be associated with struct
+generic_pm_domain objects and make pm_genpd_init() use them as
+appropriate.
+
+The new callbacks do nothing for devices belonging to power domains
+that were powered down at run time (before the transition).  For the
+other devices the action carried out depends on the type of the
+transition.  During system suspend the power domain .suspend()
+callback executes pm_generic_suspend() for the device, while the
+PM domain .suspend_noirq() callback runs pm_generic_suspend_noirq()
+for it, stops it and eventually removes power from the PM domain it
+belongs to (after all devices in the domain have been stopped and its
+subdomains have been powered off).
+
+During system resume the PM domain .resume_noirq() callback
+restores power to the PM domain (when executed for it first time),
+starts the device and executes pm_generic_resume_noirq() for it,
+while the .resume() callback executes pm_generic_resume() for the
+device.  Finally, the .complete() callback executes pm_runtime_idle()
+for the device which should put it back into the suspended state if
+its runtime PM usage count is equal to zero at that time.
+
+The actions carried out during hibernation and resume from it are
+analogous to the ones described above.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 596ba34bcd2978ee9823cc1d84df230576f8ffb9)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  551 +++++++++++++++++++++++++++++++++++++++++--
+ include/linux/pm_domain.h   |   12 +
+ 2 files changed, 548 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index f14ba32..33086e9 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -21,7 +21,7 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+       if (IS_ERR_OR_NULL(dev->pm_domain))
+               return ERR_PTR(-EINVAL);
+-      return container_of(dev->pm_domain, struct generic_pm_domain, domain);
++      return pd_to_genpd(dev->pm_domain);
+ }
+ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+@@ -46,7 +46,8 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd)
+               mutex_lock(&genpd->parent->lock);
+       mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-      if (!genpd->power_is_off)
++      if (!genpd->power_is_off
++          || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+               goto out;
+       if (genpd->parent && genpd->parent->power_is_off) {
+@@ -155,7 +156,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       unsigned int not_suspended;
+       int ret;
+-      if (genpd->power_is_off)
++      if (genpd->power_is_off || genpd->prepared_count > 0)
+               return 0;
+       if (genpd->sd_count > 0)
+@@ -260,6 +261,27 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+ }
+ /**
++ * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
++ * @dev: Device to resume.
++ * @genpd: PM domain the device belongs to.
++ */
++static void __pm_genpd_runtime_resume(struct device *dev,
++                                    struct generic_pm_domain *genpd)
++{
++      struct dev_list_entry *dle;
++
++      list_for_each_entry(dle, &genpd->dev_list, node) {
++              if (dle->dev == dev) {
++                      __pm_genpd_restore_device(dle, genpd);
++                      break;
++              }
++      }
++
++      if (genpd->start_device)
++              genpd->start_device(dev);
++}
++
++/**
+  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+  * @dev: Device to resume.
+  *
+@@ -270,7 +292,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+ static int pm_genpd_runtime_resume(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
+-      struct dev_list_entry *dle;
+       int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -284,17 +305,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
+               return ret;
+       mutex_lock(&genpd->lock);
+-
+-      list_for_each_entry(dle, &genpd->dev_list, node) {
+-              if (dle->dev == dev) {
+-                      __pm_genpd_restore_device(dle, genpd);
+-                      break;
+-              }
+-      }
+-
+-      if (genpd->start_device)
+-              genpd->start_device(dev);
+-
++      __pm_genpd_runtime_resume(dev, genpd);
+       mutex_unlock(&genpd->lock);
+       return 0;
+@@ -303,12 +314,493 @@ static int pm_genpd_runtime_resume(struct device *dev)
+ #else
+ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
++static inline void __pm_genpd_runtime_resume(struct device *dev,
++                                           struct generic_pm_domain *genpd) {}
+ #define pm_genpd_runtime_suspend      NULL
+ #define pm_genpd_runtime_resume               NULL
+ #endif /* CONFIG_PM_RUNTIME */
++#ifdef CONFIG_PM_SLEEP
++
++/**
++ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
++ * @genpd: PM domain to power off, if possible.
++ *
++ * Check if the given PM domain can be powered off (during system suspend or
++ * hibernation) and do that if so.  Also, in that case propagate to its parent.
++ *
++ * This function is only called in "noirq" stages of system power transitions,
++ * so it need not acquire locks (all of the "noirq" callbacks are executed
++ * sequentially, so it is guaranteed that it will never run twice in parallel).
++ */
++static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
++{
++      struct generic_pm_domain *parent = genpd->parent;
++
++      if (genpd->power_is_off)
++              return;
++
++      if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
++              return;
++
++      if (genpd->power_off)
++              genpd->power_off(genpd);
++
++      genpd->power_is_off = true;
++      if (parent) {
++              genpd_sd_counter_dec(parent);
++              pm_genpd_sync_poweroff(parent);
++      }
++}
++
++/**
++ * pm_genpd_prepare - Start power transition of a device in a PM domain.
++ * @dev: Device to start the transition of.
++ *
++ * Start a power transition of a device (during a system-wide power transition)
++ * under the assumption that its pm_domain field points to the domain member of
++ * an object of type struct generic_pm_domain representing a PM domain
++ * consisting of I/O devices.
++ */
++static int pm_genpd_prepare(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      mutex_lock(&genpd->lock);
++
++      if (genpd->prepared_count++ == 0)
++              genpd->suspend_power_off = genpd->power_is_off;
++
++      if (genpd->suspend_power_off) {
++              mutex_unlock(&genpd->lock);
++              return 0;
++      }
++
++      /*
++       * If the device is in the (runtime) "suspended" state, call
++       * .start_device() for it, if defined.
++       */
++      if (pm_runtime_suspended(dev))
++              __pm_genpd_runtime_resume(dev, genpd);
++
++      /*
++       * Do not check if runtime resume is pending at this point, because it
++       * has been taken care of already and if pm_genpd_poweron() ran at this
++       * point as a result of the check, it would deadlock.
++       */
++      __pm_runtime_disable(dev, false);
++
++      mutex_unlock(&genpd->lock);
++
++      return pm_generic_prepare(dev);
++}
++
++/**
++ * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
++ * @dev: Device to suspend.
++ *
++ * Suspend a device under the assumption that its pm_domain field points to the
++ * domain member of an object of type struct generic_pm_domain representing
++ * a PM domain consisting of I/O devices.
++ */
++static int pm_genpd_suspend(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
++}
++
++/**
++ * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
++ * @dev: Device to suspend.
++ *
++ * Carry out a late suspend of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
++ */
++static int pm_genpd_suspend_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++      int ret;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      if (genpd->suspend_power_off)
++              return 0;
++
++      ret = pm_generic_suspend_noirq(dev);
++      if (ret)
++              return ret;
++
++      if (genpd->stop_device)
++              genpd->stop_device(dev);
++
++      /*
++       * Since all of the "noirq" callbacks are executed sequentially, it is
++       * guaranteed that this function will never run twice in parallel for
++       * the same PM domain, so it is not necessary to use locking here.
++       */
++      genpd->suspended_count++;
++      pm_genpd_sync_poweroff(genpd);
++
++      return 0;
++}
++
++/**
++ * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
++ * @dev: Device to resume.
++ *
++ * Carry out an early resume of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a power domain consisting of I/O
++ * devices.
++ */
++static int pm_genpd_resume_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      if (genpd->suspend_power_off)
++              return 0;
++
++      /*
++       * Since all of the "noirq" callbacks are executed sequentially, it is
++       * guaranteed that this function will never run twice in parallel for
++       * the same PM domain, so it is not necessary to use locking here.
++       */
++      pm_genpd_poweron(genpd);
++      genpd->suspended_count--;
++      if (genpd->start_device)
++              genpd->start_device(dev);
++
++      return pm_generic_resume_noirq(dev);
++}
++
++/**
++ * pm_genpd_resume - Resume a device belonging to an I/O power domain.
++ * @dev: Device to resume.
++ *
++ * Resume a device under the assumption that its pm_domain field points to the
++ * domain member of an object of type struct generic_pm_domain representing
++ * a power domain consisting of I/O devices.
++ */
++static int pm_genpd_resume(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
++}
++
++/**
++ * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
++ * @dev: Device to freeze.
++ *
++ * Freeze a device under the assumption that its pm_domain field points to the
++ * domain member of an object of type struct generic_pm_domain representing
++ * a power domain consisting of I/O devices.
++ */
++static int pm_genpd_freeze(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
++}
++
++/**
++ * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
++ * @dev: Device to freeze.
++ *
++ * Carry out a late freeze of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a power domain consisting of I/O
++ * devices.
++ */
++static int pm_genpd_freeze_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++      int ret;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      if (genpd->suspend_power_off)
++              return 0;
++
++      ret = pm_generic_freeze_noirq(dev);
++      if (ret)
++              return ret;
++
++      if (genpd->stop_device)
++              genpd->stop_device(dev);
++
++      return 0;
++}
++
++/**
++ * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
++ * @dev: Device to thaw.
++ *
++ * Carry out an early thaw of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a power domain consisting of I/O
++ * devices.
++ */
++static int pm_genpd_thaw_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      if (genpd->suspend_power_off)
++              return 0;
++
++      if (genpd->start_device)
++              genpd->start_device(dev);
++
++      return pm_generic_thaw_noirq(dev);
++}
++
++/**
++ * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
++ * @dev: Device to thaw.
++ *
++ * Thaw a device under the assumption that its pm_domain field points to the
++ * domain member of an object of type struct generic_pm_domain representing
++ * a power domain consisting of I/O devices.
++ */
++static int pm_genpd_thaw(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
++}
++
++/**
++ * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
++ * @dev: Device to suspend.
++ *
++ * Power off a device under the assumption that its pm_domain field points to
++ * the domain member of an object of type struct generic_pm_domain representing
++ * a PM domain consisting of I/O devices.
++ */
++static int pm_genpd_dev_poweroff(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
++}
++
++/**
++ * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
++ * @dev: Device to suspend.
++ *
++ * Carry out a late powering off of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
++ */
++static int pm_genpd_dev_poweroff_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++      int ret;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      if (genpd->suspend_power_off)
++              return 0;
++
++      ret = pm_generic_poweroff_noirq(dev);
++      if (ret)
++              return ret;
++
++      if (genpd->stop_device)
++              genpd->stop_device(dev);
++
++      /*
++       * Since all of the "noirq" callbacks are executed sequentially, it is
++       * guaranteed that this function will never run twice in parallel for
++       * the same PM domain, so it is not necessary to use locking here.
++       */
++      genpd->suspended_count++;
++      pm_genpd_sync_poweroff(genpd);
++
++      return 0;
++}
++
++/**
++ * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
++ * @dev: Device to resume.
++ *
++ * Carry out an early restore of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a power domain consisting of I/O
++ * devices.
++ */
++static int pm_genpd_restore_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      /*
++       * Since all of the "noirq" callbacks are executed sequentially, it is
++       * guaranteed that this function will never run twice in parallel for
++       * the same PM domain, so it is not necessary to use locking here.
++       */
++      genpd->power_is_off = true;
++      if (genpd->suspend_power_off) {
++              /*
++               * The boot kernel might put the domain into the power on state,
++               * so make sure it really is powered off.
++               */
++              if (genpd->power_off)
++                      genpd->power_off(genpd);
++              return 0;
++      }
++
++      pm_genpd_poweron(genpd);
++      genpd->suspended_count--;
++      if (genpd->start_device)
++              genpd->start_device(dev);
++
++      return pm_generic_restore_noirq(dev);
++}
++
++/**
++ * pm_genpd_restore - Restore a device belonging to an I/O power domain.
++ * @dev: Device to resume.
++ *
++ * Restore a device under the assumption that its pm_domain field points to the
++ * domain member of an object of type struct generic_pm_domain representing
++ * a power domain consisting of I/O devices.
++ */
++static int pm_genpd_restore(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
++}
++
++/**
++ * pm_genpd_complete - Complete power transition of a device in a power domain.
++ * @dev: Device to complete the transition of.
++ *
++ * Complete a power transition of a device (during a system-wide power
++ * transition) under the assumption that its pm_domain field points to the
++ * domain member of an object of type struct generic_pm_domain representing
++ * a power domain consisting of I/O devices.
++ */
++static void pm_genpd_complete(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++      bool run_complete;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return;
++
++      mutex_lock(&genpd->lock);
++
++      run_complete = !genpd->suspend_power_off;
++      if (--genpd->prepared_count == 0)
++              genpd->suspend_power_off = false;
++
++      mutex_unlock(&genpd->lock);
++
++      if (run_complete) {
++              pm_generic_complete(dev);
++              pm_runtime_enable(dev);
++      }
++}
++
++#else
++
++#define pm_genpd_prepare              NULL
++#define pm_genpd_suspend              NULL
++#define pm_genpd_suspend_noirq                NULL
++#define pm_genpd_resume_noirq         NULL
++#define pm_genpd_resume                       NULL
++#define pm_genpd_freeze                       NULL
++#define pm_genpd_freeze_noirq         NULL
++#define pm_genpd_thaw_noirq           NULL
++#define pm_genpd_thaw                 NULL
++#define pm_genpd_dev_poweroff_noirq   NULL
++#define pm_genpd_dev_poweroff         NULL
++#define pm_genpd_restore_noirq                NULL
++#define pm_genpd_restore              NULL
++#define pm_genpd_complete             NULL
++
++#endif /* CONFIG_PM_SLEEP */
++
+ /**
+  * pm_genpd_add_device - Add a device to an I/O PM domain.
+  * @genpd: PM domain to add the device to.
+@@ -331,6 +823,11 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+               goto out;
+       }
++      if (genpd->prepared_count > 0) {
++              ret = -EAGAIN;
++              goto out;
++      }
++
+       list_for_each_entry(dle, &genpd->dev_list, node)
+               if (dle->dev == dev) {
+                       ret = -EINVAL;
+@@ -346,6 +843,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+       dle->dev = dev;
+       dle->need_restore = false;
+       list_add_tail(&dle->node, &genpd->dev_list);
++      genpd->device_count++;
+       spin_lock_irq(&dev->power.lock);
+       dev->pm_domain = &genpd->domain;
+@@ -375,6 +873,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+       mutex_lock(&genpd->lock);
++      if (genpd->prepared_count > 0) {
++              ret = -EAGAIN;
++              goto out;
++      }
++
+       list_for_each_entry(dle, &genpd->dev_list, node) {
+               if (dle->dev != dev)
+                       continue;
+@@ -383,6 +886,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+               dev->pm_domain = NULL;
+               spin_unlock_irq(&dev->power.lock);
++              genpd->device_count--;
+               list_del(&dle->node);
+               kfree(dle);
+@@ -390,6 +894,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+               break;
+       }
++ out:
+       mutex_unlock(&genpd->lock);
+       return ret;
+@@ -498,7 +1003,23 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->in_progress = 0;
+       genpd->sd_count = 0;
+       genpd->power_is_off = is_off;
++      genpd->device_count = 0;
++      genpd->suspended_count = 0;
+       genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+       genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+       genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
++      genpd->domain.ops.prepare = pm_genpd_prepare;
++      genpd->domain.ops.suspend = pm_genpd_suspend;
++      genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
++      genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
++      genpd->domain.ops.resume = pm_genpd_resume;
++      genpd->domain.ops.freeze = pm_genpd_freeze;
++      genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
++      genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
++      genpd->domain.ops.thaw = pm_genpd_thaw;
++      genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
++      genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
++      genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
++      genpd->domain.ops.restore = pm_genpd_restore;
++      genpd->domain.ops.complete = pm_genpd_complete;
+ }
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index b1a22c6..7961b0d 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -11,6 +11,9 @@
+ #include <linux/device.h>
++#define GPD_IN_SUSPEND        1
++#define GPD_POWER_OFF 2
++
+ struct dev_power_governor {
+       bool (*power_down_ok)(struct dev_pm_domain *domain);
+ };
+@@ -27,12 +30,21 @@ struct generic_pm_domain {
+       unsigned int in_progress;       /* Number of devices being suspended now */
+       unsigned int sd_count;  /* Number of subdomains with power "on" */
+       bool power_is_off;      /* Whether or not power has been removed */
++      unsigned int device_count;      /* Number of devices */
++      unsigned int suspended_count;   /* System suspend device counter */
++      unsigned int prepared_count;    /* Suspend counter of prepared devices */
++      bool suspend_power_off; /* Power status before system suspend */
+       int (*power_off)(struct generic_pm_domain *domain);
+       int (*power_on)(struct generic_pm_domain *domain);
+       int (*start_device)(struct device *dev);
+       int (*stop_device)(struct device *dev);
+ };
++static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
++{
++      return container_of(pd, struct generic_pm_domain, domain);
++}
++
+ struct dev_list_entry {
+       struct list_head node;
+       struct device *dev;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0008-PM-Domains-Wakeup-devices-support-for-system-sleep-t.patch b/patches.runtime_pm/0008-PM-Domains-Wakeup-devices-support-for-system-sleep-t.patch
new file mode 100644 (file)
index 0000000..61f2ef9
--- /dev/null
@@ -0,0 +1,75 @@
+From 1330bb213d5359301a54b8a8bee21fdec208be57 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:13:29 +0200
+Subject: PM / Domains: Wakeup devices support for system sleep transitions
+
+There is the problem how to handle devices set up to wake up the
+system from sleep states during system-wide power transitions.
+In some cases, those devices can be turned off entirely, because the
+wakeup signals will be generated on their behalf anyway.  In some
+other cases, they will generate wakeup signals if their clocks are
+stopped, but only if power is not removed from them.  Finally, in
+some cases, they can only generate wakeup signals if power is not
+removed from them and their clocks are enabled.
+
+To allow platform-specific code to decide whether or not to put
+wakeup devices (and their PM domains) into low-power state during
+system-wide transitions, such as system suspend, introduce a new
+generic PM domain callback, .active_wakeup(), that will be used
+during the "noirq" phase of system suspend and hibernation (after
+image creation) to decide what to do with wakeup devices.
+Specifically, if this callback is present and returns "true", the
+generic PM domain code will not execute .stop_device() for the
+given wakeup device and its PM domain won't be powered off.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit d4f2d87a8b46c14c4307c690c92bd08229f66ecf)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    8 ++++++++
+ include/linux/pm_domain.h   |    1 +
+ 2 files changed, 9 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 33086e9..1aed94c 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -450,6 +450,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+       if (ret)
+               return ret;
++      if (device_may_wakeup(dev)
++          && genpd->active_wakeup && genpd->active_wakeup(dev))
++              return 0;
++
+       if (genpd->stop_device)
+               genpd->stop_device(dev);
+@@ -670,6 +674,10 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+       if (ret)
+               return ret;
++      if (device_may_wakeup(dev)
++          && genpd->active_wakeup && genpd->active_wakeup(dev))
++              return 0;
++
+       if (genpd->stop_device)
+               genpd->stop_device(dev);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 7961b0d..98491ee 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -38,6 +38,7 @@ struct generic_pm_domain {
+       int (*power_on)(struct generic_pm_domain *domain);
+       int (*start_device)(struct device *dev);
+       int (*stop_device)(struct device *dev);
++      bool (*active_wakeup)(struct device *dev);
+ };
+ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0009-PM-Allow-the-clocks-management-code-to-be-used-durin.patch b/patches.runtime_pm/0009-PM-Allow-the-clocks-management-code-to-be-used-durin.patch
new file mode 100644 (file)
index 0000000..bbd647f
--- /dev/null
@@ -0,0 +1,147 @@
+From 60fe6d8f2058c122a37b053523a76fc791ea3cef Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:13:37 +0200
+Subject: PM: Allow the clocks management code to be used during system
+ suspend
+
+The common clocks management code in drivers/base/power/clock_ops.c
+is going to be used during system-wide power transitions as well as
+for runtime PM, so it shouldn't depend on CONFIG_PM_RUNTIME.
+However, the suspend/resume functions provided by it for
+CONFIG_PM_RUNTIME unset, to be used during system-wide power
+transitions, should not behave in the same way as their counterparts
+defined for CONFIG_PM_RUNTIME set, because in that case the clocks
+are managed differently at run time.
+
+The names of the functions still contain the word "runtime" after
+this change, but that is going to be modified by a separate patch
+later.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit b7b95920aa2e89e655afe9913ee0e55855ceda90)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/clock_ops.c |   60 +++++++++++++++++++++++++++++++++++++++-
+ include/linux/pm_runtime.h     |    2 +-
+ kernel/power/Kconfig           |    4 +--
+ 3 files changed, 62 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index c562481..2fb9c12 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -15,7 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/err.h>
+-#ifdef CONFIG_PM_RUNTIME
++#ifdef CONFIG_PM
+ struct pm_runtime_clk_data {
+       struct list_head clock_list;
+@@ -191,6 +191,10 @@ void pm_runtime_clk_destroy(struct device *dev)
+       kfree(prd);
+ }
++#endif /* CONFIG_PM */
++
++#ifdef CONFIG_PM_RUNTIME
++
+ /**
+  * pm_runtime_clk_acquire - Acquire a device clock.
+  * @dev: Device whose clock is to be acquired.
+@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+ #else /* !CONFIG_PM_RUNTIME */
++#ifdef CONFIG_PM
++
++/**
++ * pm_runtime_clk_suspend - Disable clocks in a device's PM clock list.
++ * @dev: Device to disable the clocks for.
++ */
++int pm_runtime_clk_suspend(struct device *dev)
++{
++      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clock_entry *ce;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      /* If there is no driver, the clocks are already disabled. */
++      if (!prd || !dev->driver)
++              return 0;
++
++      mutex_lock(&prd->lock);
++
++      list_for_each_entry_reverse(ce, &prd->clock_list, node)
++              clk_disable(ce->clk);
++
++      mutex_unlock(&prd->lock);
++
++      return 0;
++}
++
++/**
++ * pm_runtime_clk_resume - Enable clocks in a device's PM clock list.
++ * @dev: Device to enable the clocks for.
++ */
++int pm_runtime_clk_resume(struct device *dev)
++{
++      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clock_entry *ce;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      /* If there is no driver, the clocks should remain disabled. */
++      if (!prd || !dev->driver)
++              return 0;
++
++      mutex_lock(&prd->lock);
++
++      list_for_each_entry(ce, &prd->clock_list, node)
++              clk_enable(ce->clk);
++
++      mutex_unlock(&prd->lock);
++
++      return 0;
++}
++
++#endif /* CONFIG_PM */
++
+ /**
+  * enable_clock - Enable a device clock.
+  * @dev: Device whose clock is to be enabled.
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index ef91904..1bd5063 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -251,7 +251,7 @@ struct pm_clk_notifier_block {
+       char *con_ids[];
+ };
+-#ifdef CONFIG_PM_RUNTIME_CLK
++#ifdef CONFIG_PM_CLK
+ extern int pm_runtime_clk_init(struct device *dev);
+ extern void pm_runtime_clk_destroy(struct device *dev);
+ extern int pm_runtime_clk_add(struct device *dev, const char *con_id);
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index e83ac25..7b856b3 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -224,9 +224,9 @@ config PM_OPP
+         implementations a ready to use framework to manage OPPs.
+         For more information, read <file:Documentation/power/opp.txt>
+-config PM_RUNTIME_CLK
++config PM_CLK
+       def_bool y
+-      depends on PM_RUNTIME && HAVE_CLK
++      depends on PM && HAVE_CLK
+ config PM_GENERIC_DOMAINS
+       bool
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0010-PM-Rename-clock-management-functions.patch b/patches.runtime_pm/0010-PM-Rename-clock-management-functions.patch
new file mode 100644 (file)
index 0000000..32ad36c
--- /dev/null
@@ -0,0 +1,588 @@
+From cfd761906963c2b03706d637466730ba34ba37d1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:13:44 +0200
+Subject: PM: Rename clock management functions
+
+The common PM clock management functions may be used for system
+suspend/resume as well as for runtime PM, so rename them
+accordingly.  Modify kerneldoc comments describing these functions
+and kernel messages printed by them, so that they refer to power
+management in general rather that to runtime PM.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 3d5c30367cbc0c55c93bb158e824e00badc7ddc4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ arch/arm/mach-omap1/pm_bus.c        |    6 +-
+ arch/arm/mach-shmobile/pm_runtime.c |    6 +-
+ drivers/base/power/clock_ops.c      |  188 +++++++++++++++++------------------
+ include/linux/pm_runtime.h          |   28 +++---
+ 4 files changed, 114 insertions(+), 114 deletions(-)
+
+diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
+index 212f331..943072d 100644
+--- a/arch/arm/mach-omap1/pm_bus.c
++++ b/arch/arm/mach-omap1/pm_bus.c
+@@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev)
+       if (ret)
+               return ret;
+-      ret = pm_runtime_clk_suspend(dev);
++      ret = pm_clk_suspend(dev);
+       if (ret) {
+               pm_generic_runtime_resume(dev);
+               return ret;
+@@ -45,7 +45,7 @@ static int omap1_pm_runtime_resume(struct device *dev)
+ {
+       dev_dbg(dev, "%s\n", __func__);
+-      pm_runtime_clk_resume(dev);
++      pm_clk_resume(dev);
+       return pm_generic_runtime_resume(dev);
+ }
+@@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void)
+       if (!cpu_class_is_omap1())
+               return -ENODEV;
+-      pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
++      pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+ }
+diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
+index 99802d2..2bcde1c 100644
+--- a/arch/arm/mach-shmobile/pm_runtime.c
++++ b/arch/arm/mach-shmobile/pm_runtime.c
+@@ -30,8 +30,8 @@ static int default_platform_runtime_idle(struct device *dev)
+ static struct dev_pm_domain default_pm_domain = {
+       .ops = {
+-              .runtime_suspend = pm_runtime_clk_suspend,
+-              .runtime_resume = pm_runtime_clk_resume,
++              .runtime_suspend = pm_clk_suspend,
++              .runtime_resume = pm_clk_resume,
+               .runtime_idle = default_platform_runtime_idle,
+               USE_PLATFORM_PM_SLEEP_OPS
+       },
+@@ -52,7 +52,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
+ static int __init sh_pm_runtime_init(void)
+ {
+-      pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
++      pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+ }
+ core_initcall(sh_pm_runtime_init);
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index 2fb9c12..a846b2f 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -17,7 +17,7 @@
+ #ifdef CONFIG_PM
+-struct pm_runtime_clk_data {
++struct pm_clk_data {
+       struct list_head clock_list;
+       struct mutex lock;
+ };
+@@ -36,25 +36,25 @@ struct pm_clock_entry {
+       enum pce_status status;
+ };
+-static struct pm_runtime_clk_data *__to_prd(struct device *dev)
++static struct pm_clk_data *__to_pcd(struct device *dev)
+ {
+       return dev ? dev->power.subsys_data : NULL;
+ }
+ /**
+- * pm_runtime_clk_add - Start using a device clock for runtime PM.
+- * @dev: Device whose clock is going to be used for runtime PM.
++ * pm_clk_add - Start using a device clock for power management.
++ * @dev: Device whose clock is going to be used for power management.
+  * @con_id: Connection ID of the clock.
+  *
+  * Add the clock represented by @con_id to the list of clocks used for
+- * the runtime PM of @dev.
++ * the power management of @dev.
+  */
+-int pm_runtime_clk_add(struct device *dev, const char *con_id)
++int pm_clk_add(struct device *dev, const char *con_id)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
+-      if (!prd)
++      if (!pcd)
+               return -EINVAL;
+       ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
+               }
+       }
+-      mutex_lock(&prd->lock);
+-      list_add_tail(&ce->node, &prd->clock_list);
+-      mutex_unlock(&prd->lock);
++      mutex_lock(&pcd->lock);
++      list_add_tail(&ce->node, &pcd->clock_list);
++      mutex_unlock(&pcd->lock);
+       return 0;
+ }
+ /**
+- * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
+- * @ce: Runtime PM clock entry to destroy.
++ * __pm_clk_remove - Destroy PM clock entry.
++ * @ce: PM clock entry to destroy.
+  *
+- * This routine must be called under the mutex protecting the runtime PM list
+- * of clocks corresponding the the @ce's device.
++ * This routine must be called under the mutex protecting the PM list of clocks
++ * corresponding the the @ce's device.
+  */
+-static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
++static void __pm_clk_remove(struct pm_clock_entry *ce)
+ {
+       if (!ce)
+               return;
+@@ -108,87 +108,87 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
+ }
+ /**
+- * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
+- * @dev: Device whose clock should not be used for runtime PM any more.
++ * pm_clk_remove - Stop using a device clock for power management.
++ * @dev: Device whose clock should not be used for PM any more.
+  * @con_id: Connection ID of the clock.
+  *
+  * Remove the clock represented by @con_id from the list of clocks used for
+- * the runtime PM of @dev.
++ * the power management of @dev.
+  */
+-void pm_runtime_clk_remove(struct device *dev, const char *con_id)
++void pm_clk_remove(struct device *dev, const char *con_id)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
+-      if (!prd)
++      if (!pcd)
+               return;
+-      mutex_lock(&prd->lock);
++      mutex_lock(&pcd->lock);
+-      list_for_each_entry(ce, &prd->clock_list, node) {
++      list_for_each_entry(ce, &pcd->clock_list, node) {
+               if (!con_id && !ce->con_id) {
+-                      __pm_runtime_clk_remove(ce);
++                      __pm_clk_remove(ce);
+                       break;
+               } else if (!con_id || !ce->con_id) {
+                       continue;
+               } else if (!strcmp(con_id, ce->con_id)) {
+-                      __pm_runtime_clk_remove(ce);
++                      __pm_clk_remove(ce);
+                       break;
+               }
+       }
+-      mutex_unlock(&prd->lock);
++      mutex_unlock(&pcd->lock);
+ }
+ /**
+- * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
+- * @dev: Device to initialize the list of runtime PM clocks for.
++ * pm_clk_init - Initialize a device's list of power management clocks.
++ * @dev: Device to initialize the list of PM clocks for.
+  *
+- * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
++ * Allocate a struct pm_clk_data object, initialize its lock member and
+  * make the @dev's power.subsys_data field point to it.
+  */
+-int pm_runtime_clk_init(struct device *dev)
++int pm_clk_init(struct device *dev)
+ {
+-      struct pm_runtime_clk_data *prd;
++      struct pm_clk_data *pcd;
+-      prd = kzalloc(sizeof(*prd), GFP_KERNEL);
+-      if (!prd) {
+-              dev_err(dev, "Not enough memory fo runtime PM data.\n");
++      pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
++      if (!pcd) {
++              dev_err(dev, "Not enough memory for PM clock data.\n");
+               return -ENOMEM;
+       }
+-      INIT_LIST_HEAD(&prd->clock_list);
+-      mutex_init(&prd->lock);
+-      dev->power.subsys_data = prd;
++      INIT_LIST_HEAD(&pcd->clock_list);
++      mutex_init(&pcd->lock);
++      dev->power.subsys_data = pcd;
+       return 0;
+ }
+ /**
+- * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
+- * @dev: Device to destroy the list of runtime PM clocks for.
++ * pm_clk_destroy - Destroy a device's list of power management clocks.
++ * @dev: Device to destroy the list of PM clocks for.
+  *
+  * Clear the @dev's power.subsys_data field, remove the list of clock entries
+- * from the struct pm_runtime_clk_data object pointed to by it before and free
++ * from the struct pm_clk_data object pointed to by it before and free
+  * that object.
+  */
+-void pm_runtime_clk_destroy(struct device *dev)
++void pm_clk_destroy(struct device *dev)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce, *c;
+-      if (!prd)
++      if (!pcd)
+               return;
+       dev->power.subsys_data = NULL;
+-      mutex_lock(&prd->lock);
++      mutex_lock(&pcd->lock);
+-      list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
+-              __pm_runtime_clk_remove(ce);
++      list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
++              __pm_clk_remove(ce);
+-      mutex_unlock(&prd->lock);
++      mutex_unlock(&pcd->lock);
+-      kfree(prd);
++      kfree(pcd);
+ }
+ #endif /* CONFIG_PM */
+@@ -196,11 +196,11 @@ void pm_runtime_clk_destroy(struct device *dev)
+ #ifdef CONFIG_PM_RUNTIME
+ /**
+- * pm_runtime_clk_acquire - Acquire a device clock.
++ * pm_clk_acquire - Acquire a device clock.
+  * @dev: Device whose clock is to be acquired.
+  * @con_id: Connection ID of the clock.
+  */
+-static void pm_runtime_clk_acquire(struct device *dev,
++static void pm_clk_acquire(struct device *dev,
+                                   struct pm_clock_entry *ce)
+ {
+       ce->clk = clk_get(dev, ce->con_id);
+@@ -213,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
+ }
+ /**
+- * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
++ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+  * @dev: Device to disable the clocks for.
+  */
+-int pm_runtime_clk_suspend(struct device *dev)
++int pm_clk_suspend(struct device *dev)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
+       dev_dbg(dev, "%s()\n", __func__);
+-      if (!prd)
++      if (!pcd)
+               return 0;
+-      mutex_lock(&prd->lock);
++      mutex_lock(&pcd->lock);
+-      list_for_each_entry_reverse(ce, &prd->clock_list, node) {
++      list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
+               if (ce->status == PCE_STATUS_NONE)
+-                      pm_runtime_clk_acquire(dev, ce);
++                      pm_clk_acquire(dev, ce);
+               if (ce->status < PCE_STATUS_ERROR) {
+                       clk_disable(ce->clk);
+@@ -238,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
+               }
+       }
+-      mutex_unlock(&prd->lock);
++      mutex_unlock(&pcd->lock);
+       return 0;
+ }
+ /**
+- * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
++ * pm_clk_resume - Enable clocks in a device's PM clock list.
+  * @dev: Device to enable the clocks for.
+  */
+-int pm_runtime_clk_resume(struct device *dev)
++int pm_clk_resume(struct device *dev)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
+       dev_dbg(dev, "%s()\n", __func__);
+-      if (!prd)
++      if (!pcd)
+               return 0;
+-      mutex_lock(&prd->lock);
++      mutex_lock(&pcd->lock);
+-      list_for_each_entry(ce, &prd->clock_list, node) {
++      list_for_each_entry(ce, &pcd->clock_list, node) {
+               if (ce->status == PCE_STATUS_NONE)
+-                      pm_runtime_clk_acquire(dev, ce);
++                      pm_clk_acquire(dev, ce);
+               if (ce->status < PCE_STATUS_ERROR) {
+                       clk_enable(ce->clk);
+@@ -269,13 +269,13 @@ int pm_runtime_clk_resume(struct device *dev)
+               }
+       }
+-      mutex_unlock(&prd->lock);
++      mutex_unlock(&pcd->lock);
+       return 0;
+ }
+ /**
+- * pm_runtime_clk_notify - Notify routine for device addition and removal.
++ * pm_clk_notify - Notify routine for device addition and removal.
+  * @nb: Notifier block object this function is a member of.
+  * @action: Operation being carried out by the caller.
+  * @data: Device the routine is being run for.
+@@ -284,13 +284,13 @@ int pm_runtime_clk_resume(struct device *dev)
+  * struct pm_clk_notifier_block containing all of the requisite data.
+  * Specifically, the pm_domain member of that object is copied to the device's
+  * pm_domain field and its con_ids member is used to populate the device's list
+- * of runtime PM clocks, depending on @action.
++ * of PM clocks, depending on @action.
+  *
+  * If the device's pm_domain field is already populated with a value different
+  * from the one stored in the struct pm_clk_notifier_block object, the function
+  * does nothing.
+  */
+-static int pm_runtime_clk_notify(struct notifier_block *nb,
++static int pm_clk_notify(struct notifier_block *nb,
+                                unsigned long action, void *data)
+ {
+       struct pm_clk_notifier_block *clknb;
+@@ -307,16 +307,16 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+               if (dev->pm_domain)
+                       break;
+-              error = pm_runtime_clk_init(dev);
++              error = pm_clk_init(dev);
+               if (error)
+                       break;
+               dev->pm_domain = clknb->pm_domain;
+               if (clknb->con_ids[0]) {
+                       for (con_id = clknb->con_ids; *con_id; con_id++)
+-                              pm_runtime_clk_add(dev, *con_id);
++                              pm_clk_add(dev, *con_id);
+               } else {
+-                      pm_runtime_clk_add(dev, NULL);
++                      pm_clk_add(dev, NULL);
+               }
+               break;
+@@ -325,7 +325,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+                       break;
+               dev->pm_domain = NULL;
+-              pm_runtime_clk_destroy(dev);
++              pm_clk_destroy(dev);
+               break;
+       }
+@@ -337,51 +337,51 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+ #ifdef CONFIG_PM
+ /**
+- * pm_runtime_clk_suspend - Disable clocks in a device's PM clock list.
++ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+  * @dev: Device to disable the clocks for.
+  */
+-int pm_runtime_clk_suspend(struct device *dev)
++int pm_clk_suspend(struct device *dev)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
+       dev_dbg(dev, "%s()\n", __func__);
+       /* If there is no driver, the clocks are already disabled. */
+-      if (!prd || !dev->driver)
++      if (!pcd || !dev->driver)
+               return 0;
+-      mutex_lock(&prd->lock);
++      mutex_lock(&pcd->lock);
+-      list_for_each_entry_reverse(ce, &prd->clock_list, node)
++      list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+               clk_disable(ce->clk);
+-      mutex_unlock(&prd->lock);
++      mutex_unlock(&pcd->lock);
+       return 0;
+ }
+ /**
+- * pm_runtime_clk_resume - Enable clocks in a device's PM clock list.
++ * pm_clk_resume - Enable clocks in a device's PM clock list.
+  * @dev: Device to enable the clocks for.
+  */
+-int pm_runtime_clk_resume(struct device *dev)
++int pm_clk_resume(struct device *dev)
+ {
+-      struct pm_runtime_clk_data *prd = __to_prd(dev);
++      struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
+       dev_dbg(dev, "%s()\n", __func__);
+       /* If there is no driver, the clocks should remain disabled. */
+-      if (!prd || !dev->driver)
++      if (!pcd || !dev->driver)
+               return 0;
+-      mutex_lock(&prd->lock);
++      mutex_lock(&pcd->lock);
+-      list_for_each_entry(ce, &prd->clock_list, node)
++      list_for_each_entry(ce, &pcd->clock_list, node)
+               clk_enable(ce->clk);
+-      mutex_unlock(&prd->lock);
++      mutex_unlock(&pcd->lock);
+       return 0;
+ }
+@@ -423,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
+ }
+ /**
+- * pm_runtime_clk_notify - Notify routine for device addition and removal.
++ * pm_clk_notify - Notify routine for device addition and removal.
+  * @nb: Notifier block object this function is a member of.
+  * @action: Operation being carried out by the caller.
+  * @data: Device the routine is being run for.
+@@ -433,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id)
+  * Specifically, the con_ids member of that object is used to enable or disable
+  * the device's clocks, depending on @action.
+  */
+-static int pm_runtime_clk_notify(struct notifier_block *nb,
++static int pm_clk_notify(struct notifier_block *nb,
+                                unsigned long action, void *data)
+ {
+       struct pm_clk_notifier_block *clknb;
+@@ -469,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
+ #endif /* !CONFIG_PM_RUNTIME */
+ /**
+- * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
++ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
+  * @bus: Bus type to add the notifier to.
+  * @clknb: Notifier to be added to the given bus type.
+  *
+  * The nb member of @clknb is not expected to be initialized and its
+- * notifier_call member will be replaced with pm_runtime_clk_notify().  However,
++ * notifier_call member will be replaced with pm_clk_notify().  However,
+  * the remaining members of @clknb should be populated prior to calling this
+  * routine.
+  */
+-void pm_runtime_clk_add_notifier(struct bus_type *bus,
++void pm_clk_add_notifier(struct bus_type *bus,
+                                struct pm_clk_notifier_block *clknb)
+ {
+       if (!bus || !clknb)
+               return;
+-      clknb->nb.notifier_call = pm_runtime_clk_notify;
++      clknb->nb.notifier_call = pm_clk_notify;
+       bus_register_notifier(bus, &clknb->nb);
+ }
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 1bd5063..dfb8539 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -252,36 +252,36 @@ struct pm_clk_notifier_block {
+ };
+ #ifdef CONFIG_PM_CLK
+-extern int pm_runtime_clk_init(struct device *dev);
+-extern void pm_runtime_clk_destroy(struct device *dev);
+-extern int pm_runtime_clk_add(struct device *dev, const char *con_id);
+-extern void pm_runtime_clk_remove(struct device *dev, const char *con_id);
+-extern int pm_runtime_clk_suspend(struct device *dev);
+-extern int pm_runtime_clk_resume(struct device *dev);
++extern int pm_clk_init(struct device *dev);
++extern void pm_clk_destroy(struct device *dev);
++extern int pm_clk_add(struct device *dev, const char *con_id);
++extern void pm_clk_remove(struct device *dev, const char *con_id);
++extern int pm_clk_suspend(struct device *dev);
++extern int pm_clk_resume(struct device *dev);
+ #else
+-static inline int pm_runtime_clk_init(struct device *dev)
++static inline int pm_clk_init(struct device *dev)
+ {
+       return -EINVAL;
+ }
+-static inline void pm_runtime_clk_destroy(struct device *dev)
++static inline void pm_clk_destroy(struct device *dev)
+ {
+ }
+-static inline int pm_runtime_clk_add(struct device *dev, const char *con_id)
++static inline int pm_clk_add(struct device *dev, const char *con_id)
+ {
+       return -EINVAL;
+ }
+-static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id)
++static inline void pm_clk_remove(struct device *dev, const char *con_id)
+ {
+ }
+-#define pm_runtime_clock_suspend      NULL
+-#define pm_runtime_clock_resume               NULL
++#define pm_clk_suspend        NULL
++#define pm_clk_resume NULL
+ #endif
+ #ifdef CONFIG_HAVE_CLK
+-extern void pm_runtime_clk_add_notifier(struct bus_type *bus,
++extern void pm_clk_add_notifier(struct bus_type *bus,
+                                       struct pm_clk_notifier_block *clknb);
+ #else
+-static inline void pm_runtime_clk_add_notifier(struct bus_type *bus,
++static inline void pm_clk_add_notifier(struct bus_type *bus,
+                                       struct pm_clk_notifier_block *clknb)
+ {
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0011-PM-Runtime-Update-documentation-of-interactions-with.patch b/patches.runtime_pm/0011-PM-Runtime-Update-documentation-of-interactions-with.patch
new file mode 100644 (file)
index 0000000..3d59c07
--- /dev/null
@@ -0,0 +1,90 @@
+From 313112d5f92eba88e4ce0e40e8b1614c6a7210d5 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:29:05 +0200
+Subject: PM / Runtime: Update documentation of interactions with system sleep
+
+The documents describing the interactions between runtime PM and
+system sleep generally refer to the model in which the system sleep
+state is entered through a global firmware or hardware operation.
+As a result, some recommendations given in there are not entirely
+suitable for systems in which this is not the case.  Update the
+documentation to take the existence of those systems into account.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 455716e9b12ba93e93181ac88bef62e4eb5ac66c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt    |    6 +++---
+ Documentation/power/runtime_pm.txt |   27 +++++++++++++++++++--------
+ 2 files changed, 22 insertions(+), 11 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 85c6f98..3384d59 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -604,7 +604,7 @@ state temporarily, for example so that its system wakeup capability can be
+ disabled.  This all depends on the hardware and the design of the subsystem and
+ device driver in question.
+-During system-wide resume from a sleep state it's best to put devices into the
+-full-power state, as explained in Documentation/power/runtime_pm.txt.  Refer to
+-that document for more information regarding this particular issue as well as
++During system-wide resume from a sleep state it's easiest to put devices into
++the full-power state, as explained in Documentation/power/runtime_pm.txt.  Refer
++to that document for more information regarding this particular issue as well as
+ for information on the device runtime power management framework in general.
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 4b011b1..513c52e 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -553,9 +553,9 @@ suspend routine).  It may be necessary to resume the device and suspend it again
+ in order to do so.  The same is true if the driver uses different power levels
+ or other settings for run-time suspend and system sleep.
+-During system resume, devices generally should be brought back to full power,
+-even if they were suspended before the system sleep began.  There are several
+-reasons for this, including:
++During system resume, the simplest approach is to bring all devices back to full
++power, even if they had been suspended before the system suspend began.  There
++are several reasons for this, including:
+   * The device might need to switch power levels, wake-up settings, etc.
+@@ -572,16 +572,27 @@ reasons for this, including:
+   * Even though the device was suspended, if its usage counter was > 0 then most
+     likely it would need a run-time resume in the near future anyway.
+-  * Always going back to full power is simplest.
+-
+-If the device was suspended before the sleep began, then its run-time PM status
+-will have to be updated to reflect the actual post-system sleep status.  The way
+-to do this is:
++If the device had been suspended before the system suspend began and it's
++brought back to full power during resume, then its run-time PM status will have
++to be updated to reflect the actual post-system sleep status.  The way to do
++this is:
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
++On some systems, however, system sleep is not entered through a global firmware
++or hardware operation.  Instead, all hardware components are put into low-power
++states directly by the kernel in a coordinated way.  Then, the system sleep
++state effectively follows from the states the hardware components end up in
++and the system is woken up from that state by a hardware interrupt or a similar
++mechanism entirely under the kernel's control.  As a result, the kernel never
++gives control away and the states of all devices during resume are precisely
++known to it.  If that is the case and none of the situations listed above takes
++place (in particular, if the system is not waking up from hibernation), it may
++be more efficient to leave the devices that had been suspended before the system
++suspend began in the suspended state.
++
+ 7. Generic subsystem callbacks
+ Subsystems may wish to conserve code space by using the set of generic power
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0012-PM-Runtime-Return-special-error-code-if-runtime-PM-i.patch b/patches.runtime_pm/0012-PM-Runtime-Return-special-error-code-if-runtime-PM-i.patch
new file mode 100644 (file)
index 0000000..194d416
--- /dev/null
@@ -0,0 +1,115 @@
+From 143c483c6f44d21be959051e4d494de9709a96cb Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 1 Jul 2011 22:29:15 +0200
+Subject: PM / Runtime: Return special error code if runtime PM is disabled
+
+Some callers of pm_runtime_get_sync() and other runtime PM helper
+functions, scsi_autopm_get_host() and scsi_autopm_get_device() in
+particular, need to distinguish error codes returned when runtime PM
+is disabled (i.e. power.disable_depth is nonzero for the given
+device) from error codes returned in other situations.  For this
+reason, make the runtime PM helper functions return -EACCES when
+power.disable_depth is nonzero and ensure that this error code
+won't be returned by them in any other circumstances.  Modify
+scsi_autopm_get_host() and scsi_autopm_get_device() to check the
+error code returned by pm_runtime_get_sync() and ignore -EACCES.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 632e270e01d8a1ee9e8ea56c83028727f17b1d17)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |    6 ++++--
+ drivers/base/power/runtime.c       |    9 +++++----
+ drivers/scsi/scsi_pm.c             |    8 ++++----
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 513c52e..0ec3d61 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -291,7 +291,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+     - execute the subsystem-level suspend callback for the device; returns 0 on
+       success, 1 if the device's run-time PM status was already 'suspended', or
+       error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt
+-      to suspend the device again in future
++      to suspend the device again in future and -EACCES means that
++      'power.disable_depth' is different from 0
+   int pm_runtime_autosuspend(struct device *dev);
+     - same as pm_runtime_suspend() except that the autosuspend delay is taken
+@@ -304,7 +305,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+       success, 1 if the device's run-time PM status was already 'active' or
+       error code on failure, where -EAGAIN means it may be safe to attempt to
+       resume the device again in future, but 'power.runtime_error' should be
+-      checked additionally
++      checked additionally, and -EACCES means that 'power.disable_depth' is
++      different from 0
+   int pm_request_idle(struct device *dev);
+     - submit a request to execute the subsystem-level idle callback for the
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 5f5c423..ee99025 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
+       if (dev->power.runtime_error)
+               retval = -EINVAL;
+-      else if (atomic_read(&dev->power.usage_count) > 0
+-          || dev->power.disable_depth > 0)
++      else if (dev->power.disable_depth > 0)
++              retval = -EACCES;
++      else if (atomic_read(&dev->power.usage_count) > 0)
+               retval = -EAGAIN;
+       else if (!pm_children_suspended(dev))
+               retval = -EBUSY;
+@@ -262,7 +263,7 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+               spin_lock_irq(&dev->power.lock);
+       }
+       dev->power.runtime_error = retval;
+-      return retval;
++      return retval != -EACCES ? retval : -EIO;
+ }
+ /**
+@@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+       if (dev->power.runtime_error)
+               retval = -EINVAL;
+       else if (dev->power.disable_depth > 0)
+-              retval = -EAGAIN;
++              retval = -EACCES;
+       if (retval)
+               goto out;
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
+index 122a5a2..995cdd5 100644
+--- a/drivers/scsi/scsi_pm.c
++++ b/drivers/scsi/scsi_pm.c
+@@ -159,9 +159,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev)
+       int     err;
+       err = pm_runtime_get_sync(&sdev->sdev_gendev);
+-      if (err < 0)
++      if (err < 0 && err !=-EACCES)
+               pm_runtime_put_sync(&sdev->sdev_gendev);
+-      else if (err > 0)
++      else
+               err = 0;
+       return err;
+ }
+@@ -188,9 +188,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost)
+       int     err;
+       err = pm_runtime_get_sync(&shost->shost_gendev);
+-      if (err < 0)
++      if (err < 0 && err !=-EACCES)
+               pm_runtime_put_sync(&shost->shost_gendev);
+-      else if (err > 0)
++      else
+               err = 0;
+       return err;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0013-PM-Limit-race-conditions-between-runtime-PM-and-syst.patch b/patches.runtime_pm/0013-PM-Limit-race-conditions-between-runtime-PM-and-syst.patch
new file mode 100644 (file)
index 0000000..1ac0565
--- /dev/null
@@ -0,0 +1,224 @@
+From e548d5f9651fa90d90090a843c9495c4fcb38600 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 6 Jul 2011 10:51:58 +0200
+Subject: PM: Limit race conditions between runtime PM and system sleep (v2)
+
+One of the roles of the PM core is to prevent different PM callbacks
+executed for the same device object from racing with each other.
+Unfortunately, after commit e8665002477f0278f84f898145b1f141ba26ee26
+(PM: Allow pm_runtime_suspend() to succeed during system suspend)
+runtime PM callbacks may be executed concurrently with system
+suspend/resume callbacks for the same device.
+
+The main reason for commit e8665002477f0278f84f898145b1f141ba26ee26
+was that some subsystems and device drivers wanted to use runtime PM
+helpers, pm_runtime_suspend() and pm_runtime_put_sync() in
+particular, for carrying out the suspend of devices in their
+.suspend() callbacks.  However, as it's been determined recently,
+there are multiple reasons not to do so, inlcuding:
+
+ * The caller really doesn't control the runtime PM usage counters,
+   because user space can access them through sysfs and effectively
+   block runtime PM.  That means using pm_runtime_suspend() or
+   pm_runtime_get_sync() to suspend devices during system suspend
+   may or may not work.
+
+ * If a driver calls pm_runtime_suspend() from its .suspend()
+   callback, it causes the subsystem's .runtime_suspend() callback to
+   be executed, which leads to the call sequence:
+
+   subsys->suspend(dev)
+      driver->suspend(dev)
+         pm_runtime_suspend(dev)
+            subsys->runtime_suspend(dev)
+
+   recursive from the subsystem's point of view.  For some subsystems
+   that may actually work (e.g. the platform bus type), but for some
+   it will fail in a rather spectacular fashion (e.g. PCI).  In each
+   case it means a layering violation.
+
+ * Both the subsystem and the driver can provide .suspend_noirq()
+   callbacks for system suspend that can do whatever the
+   .runtime_suspend() callbacks do just fine, so it really isn't
+   necessary to call pm_runtime_suspend() during system suspend.
+
+ * The runtime PM's handling of wakeup devices is usually different
+   from the system suspend's one, so .runtime_suspend() may simply be
+   inappropriate for system suspend.
+
+ * System suspend is supposed to work even if CONFIG_PM_RUNTIME is
+   unset.
+
+ * The runtime PM workqueue is frozen before system suspend, so if
+   whatever the driver is going to do during system suspend depends
+   on it, that simply won't work.
+
+Still, there is a good reason to allow pm_runtime_resume() to
+succeed during system suspend and resume (for instance, some
+subsystems and device drivers may legitimately use it to ensure that
+their devices are in full-power states before suspending them).
+Moreover, there is no reason to prevent runtime PM callbacks from
+being executed in parallel with the system suspend/resume .prepare()
+and .complete() callbacks and the code removed by commit
+e8665002477f0278f84f898145b1f141ba26ee26 went too far in this
+respect.  On the other hand, runtime PM callbacks, including
+.runtime_resume(), must not be executed during system suspend's
+"late" stage of suspending devices and during system resume's "early"
+device resume stage.
+
+Taking all of the above into consideration, make the PM core
+acquire a runtime PM reference to every device and resume it if
+there's a runtime PM resume request pending right before executing
+the subsystem-level .suspend() callback for it.  Make the PM core
+drop references to all devices right after executing the
+subsystem-level .resume() callbacks for them.  Additionally,
+make the PM core disable the runtime PM framework for all devices
+during system suspend, after executing the subsystem-level .suspend()
+callbacks for them, and enable the runtime PM framework for all
+devices during system resume, right before executing the
+subsystem-level .resume() callbacks for them.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 1e2ef05bb8cf851a694d38e9170c89e7ff052741)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   21 +++++++++++++++++++++
+ drivers/base/power/main.c          |   35 +++++++++++++++++++++++------------
+ 2 files changed, 44 insertions(+), 12 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 0ec3d61..d50dd1a 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -583,6 +583,13 @@ this is:
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
++The PM core always increments the run-time usage counter before calling the
++->suspend() callback and decrements it after calling the ->resume() callback.
++Hence disabling run-time PM temporarily like this will not cause any runtime
++suspend attempts to be permanently lost.  If the usage count goes to zero
++following the return of the ->resume() callback, the ->runtime_idle() callback
++will be invoked as usual.
++
+ On some systems, however, system sleep is not entered through a global firmware
+ or hardware operation.  Instead, all hardware components are put into low-power
+ states directly by the kernel in a coordinated way.  Then, the system sleep
+@@ -595,6 +602,20 @@ place (in particular, if the system is not waking up from hibernation), it may
+ be more efficient to leave the devices that had been suspended before the system
+ suspend began in the suspended state.
++The PM core does its best to reduce the probability of race conditions between
++the runtime PM and system suspend/resume (and hibernation) callbacks by carrying
++out the following operations:
++
++  * During system suspend it calls pm_runtime_get_noresume() and
++    pm_runtime_barrier() for every device right before executing the
++    subsystem-level .suspend() callback for it.  In addition to that it calls
++    pm_runtime_disable() for every device right after executing the
++    subsystem-level .suspend() callback for it.
++
++  * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
++    for every device right before and right after executing the subsystem-level
++    .resume() callback for it, respectively.
++
+ 7. Generic subsystem callbacks
+ Subsystems may wish to conserve code space by using the set of generic power
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 85b591a..a854591 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -505,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
+ static int device_resume(struct device *dev, pm_message_t state, bool async)
+ {
+       int error = 0;
++      bool put = false;
+       TRACE_DEVICE(dev);
+       TRACE_RESUME(0);
+@@ -521,6 +522,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+       if (!dev->power.is_suspended)
+               goto Unlock;
++      pm_runtime_enable(dev);
++      put = true;
++
+       if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "power domain ");
+               error = pm_op(dev, &dev->pm_domain->ops, state);
+@@ -563,6 +567,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+       complete_all(&dev->power.completion);
+       TRACE_RESUME(error);
++
++      if (put)
++              pm_runtime_put_sync(dev);
++
+       return error;
+ }
+@@ -843,16 +851,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+       int error = 0;
+       dpm_wait_for_children(dev, async);
+-      device_lock(dev);
+       if (async_error)
+-              goto Unlock;
++              return 0;
++
++      pm_runtime_get_noresume(dev);
++      if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
++              pm_wakeup_event(dev, 0);
+       if (pm_wakeup_pending()) {
++              pm_runtime_put_sync(dev);
+               async_error = -EBUSY;
+-              goto Unlock;
++              return 0;
+       }
++      device_lock(dev);
++
+       if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "power domain ");
+               error = pm_op(dev, &dev->pm_domain->ops, state);
+@@ -890,12 +904,15 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+  End:
+       dev->power.is_suspended = !error;
+- Unlock:
+       device_unlock(dev);
+       complete_all(&dev->power.completion);
+-      if (error)
++      if (error) {
++              pm_runtime_put_sync(dev);
+               async_error = error;
++      } else if (dev->power.is_suspended) {
++              __pm_runtime_disable(dev, false);
++      }
+       return error;
+ }
+@@ -1035,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
+               get_device(dev);
+               mutex_unlock(&dpm_list_mtx);
+-              pm_runtime_get_noresume(dev);
+-              if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+-                      pm_wakeup_event(dev, 0);
+-
+-              pm_runtime_put_sync(dev);
+-              error = pm_wakeup_pending() ?
+-                              -EBUSY : device_prepare(dev, state);
++              error = device_prepare(dev, state);
+               mutex_lock(&dpm_list_mtx);
+               if (error) {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0014-PM-Runtime-Improve-documentation-of-enable-disable-a.patch b/patches.runtime_pm/0014-PM-Runtime-Improve-documentation-of-enable-disable-a.patch
new file mode 100644 (file)
index 0000000..6eb6a53
--- /dev/null
@@ -0,0 +1,60 @@
+From 24acf9a9be8bb6115cd58f5453c1bf18b69243f0 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 6 Jul 2011 10:52:06 +0200
+Subject: PM / Runtime: Improve documentation of enable, disable and barrier
+
+The runtime PM documentation in Documentation/power/runtime_pm.txt
+doesn't say that pm_runtime_enable() and pm_runtime_disable() work by
+operating on power.disable_depth, which is wrong, because the
+possibility of nesting disables doesn't follow from the description
+of these functions.  Also, there is no description of
+pm_runtime_barrier() at all in the document, which is confusing.
+Improve the documentation by fixing those issues.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e358bad75ff13210f5211cac9f93d76170d43f89)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index d50dd1a..ca15bbb 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -369,17 +369,27 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+       pm_runtime_autosuspend(dev) and return its result
+   void pm_runtime_enable(struct device *dev);
+-    - enable the run-time PM helper functions to run the device bus type's
+-      run-time PM callbacks described in Section 2
++    - decrement the device's 'power.disable_depth' field; if that field is equal
++      to zero, the run-time PM helper functions can execute subsystem-level
++      callbacks described in Section 2 for the device
+   int pm_runtime_disable(struct device *dev);
+-    - prevent the run-time PM helper functions from running subsystem-level
+-      run-time PM callbacks for the device, make sure that all of the pending
++    - increment the device's 'power.disable_depth' field (if the value of that
++      field was previously zero, this prevents subsystem-level runtime PM
++      callbacks from being run for the device), make sure that all of the pending
+       run-time PM operations on the device are either completed or canceled;
+       returns 1 if there was a resume request pending and it was necessary to
+       execute the subsystem-level resume callback for the device to satisfy that
+       request, otherwise 0 is returned
++  int pm_runtime_barrier(struct device *dev);
++    - check if there's a resume request pending for the device and resume it
++      (synchronously) in that case, cancel any other pending runtime PM requests
++      regarding it and wait for all runtime PM operations on it in progress to
++      complete; returns 1 if there was a resume request pending and it was
++      necessary to execute the subsystem-level resume callback for the device to
++      satisfy that request, otherwise 0 is returned
++
+   void pm_suspend_ignore_children(struct device *dev, bool enable);
+     - set/unset the power.ignore_children flag of the device
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0015-PM-Runtime-Replace-run-time-with-runtime-in-document.patch b/patches.runtime_pm/0015-PM-Runtime-Replace-run-time-with-runtime-in-document.patch
new file mode 100644 (file)
index 0000000..9586970
--- /dev/null
@@ -0,0 +1,685 @@
+From 653b76192ea82c4960de68f8ef2991a6b648fe03 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 6 Jul 2011 10:52:13 +0200
+Subject: PM / Runtime: Replace "run-time" with "runtime" in documentation
+
+The runtime PM documentation and kerneldoc comments sometimes spell
+"runtime" with a dash (i.e. "run-time").  Replace all of those
+instances with "runtime" to make the naming consistent.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 62052ab1d1a456f5f62f8b753e12d10ca1a83604)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |  130 ++++++++++++++++++------------------
+ drivers/base/power/runtime.c       |   66 +++++++++---------
+ 2 files changed, 98 insertions(+), 98 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index ca15bbb..40e47c7 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -1,39 +1,39 @@
+-Run-time Power Management Framework for I/O Devices
++Runtime Power Management Framework for I/O Devices
+ (C) 2009-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ (C) 2010 Alan Stern <stern@rowland.harvard.edu>
+ 1. Introduction
+-Support for run-time power management (run-time PM) of I/O devices is provided
++Support for runtime power management (runtime PM) of I/O devices is provided
+ at the power management core (PM core) level by means of:
+ * The power management workqueue pm_wq in which bus types and device drivers can
+   put their PM-related work items.  It is strongly recommended that pm_wq be
+-  used for queuing all work items related to run-time PM, because this allows
++  used for queuing all work items related to runtime PM, because this allows
+   them to be synchronized with system-wide power transitions (suspend to RAM,
+   hibernation and resume from system sleep states).  pm_wq is declared in
+   include/linux/pm_runtime.h and defined in kernel/power/main.c.
+-* A number of run-time PM fields in the 'power' member of 'struct device' (which
++* A number of runtime PM fields in the 'power' member of 'struct device' (which
+   is of the type 'struct dev_pm_info', defined in include/linux/pm.h) that can
+-  be used for synchronizing run-time PM operations with one another.
++  be used for synchronizing runtime PM operations with one another.
+-* Three device run-time PM callbacks in 'struct dev_pm_ops' (defined in
++* Three device runtime PM callbacks in 'struct dev_pm_ops' (defined in
+   include/linux/pm.h).
+ * A set of helper functions defined in drivers/base/power/runtime.c that can be
+-  used for carrying out run-time PM operations in such a way that the
++  used for carrying out runtime PM operations in such a way that the
+   synchronization between them is taken care of by the PM core.  Bus types and
+   device drivers are encouraged to use these functions.
+-The run-time PM callbacks present in 'struct dev_pm_ops', the device run-time PM
++The runtime PM callbacks present in 'struct dev_pm_ops', the device runtime PM
+ fields of 'struct dev_pm_info' and the core helper functions provided for
+-run-time PM are described below.
++runtime PM are described below.
+-2. Device Run-time PM Callbacks
++2. Device Runtime PM Callbacks
+-There are three device run-time PM callbacks defined in 'struct dev_pm_ops':
++There are three device runtime PM callbacks defined in 'struct dev_pm_ops':
+ struct dev_pm_ops {
+       ...
+@@ -72,11 +72,11 @@ knows what to do to handle the device).
+     not mean that the device has been put into a low power state.  It is
+     supposed to mean, however, that the device will not process data and will
+     not communicate with the CPU(s) and RAM until the subsystem-level resume
+-    callback is executed for it.  The run-time PM status of a device after
++    callback is executed for it.  The runtime PM status of a device after
+     successful execution of the subsystem-level suspend callback is 'suspended'.
+   * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
+-    the device's run-time PM status is 'active', which means that the device
++    the device's runtime PM status is 'active', which means that the device
+     _must_ be fully operational afterwards.
+   * If the subsystem-level suspend callback returns an error code different
+@@ -104,7 +104,7 @@ the device).
+   * Once the subsystem-level resume callback has completed successfully, the PM
+     core regards the device as fully operational, which means that the device
+-    _must_ be able to complete I/O operations as needed.  The run-time PM status
++    _must_ be able to complete I/O operations as needed.  The runtime PM status
+     of the device is then 'active'.
+   * If the subsystem-level resume callback returns an error code, the PM core
+@@ -130,7 +130,7 @@ device in that case.  The value returned by this callback is ignored by the PM
+ core.
+ The helper functions provided by the PM core, described in Section 4, guarantee
+-that the following constraints are met with respect to the bus type's run-time
++that the following constraints are met with respect to the bus type's runtime
+ PM callbacks:
+ (1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
+@@ -142,7 +142,7 @@ PM callbacks:
+ (2) ->runtime_idle() and ->runtime_suspend() can only be executed for 'active'
+     devices (i.e. the PM core will only execute ->runtime_idle() or
+-    ->runtime_suspend() for the devices the run-time PM status of which is
++    ->runtime_suspend() for the devices the runtime PM status of which is
+     'active').
+ (3) ->runtime_idle() and ->runtime_suspend() can only be executed for a device
+@@ -151,7 +151,7 @@ PM callbacks:
+     flag of which is set.
+ (4) ->runtime_resume() can only be executed for 'suspended' devices  (i.e. the
+-    PM core will only execute ->runtime_resume() for the devices the run-time
++    PM core will only execute ->runtime_resume() for the devices the runtime
+     PM status of which is 'suspended').
+ Additionally, the helper functions provided by the PM core obey the following
+@@ -171,9 +171,9 @@ rules:
+     scheduled requests to execute the other callbacks for the same device,
+     except for scheduled autosuspends.
+-3. Run-time PM Device Fields
++3. Runtime PM Device Fields
+-The following device run-time PM fields are present in 'struct dev_pm_info', as
++The following device runtime PM fields are present in 'struct dev_pm_info', as
+ defined in include/linux/pm.h:
+   struct timer_list suspend_timer;
+@@ -205,7 +205,7 @@ defined in include/linux/pm.h:
+   unsigned int disable_depth;
+     - used for disabling the helper funcions (they work normally if this is
+-      equal to zero); the initial value of it is 1 (i.e. run-time PM is
++      equal to zero); the initial value of it is 1 (i.e. runtime PM is
+       initially disabled for all devices)
+   unsigned int runtime_error;
+@@ -229,10 +229,10 @@ defined in include/linux/pm.h:
+       suspend to complete; means "start a resume as soon as you've suspended"
+   unsigned int run_wake;
+-    - set if the device is capable of generating run-time wake-up events
++    - set if the device is capable of generating runtime wake-up events
+   enum rpm_status runtime_status;
+-    - the run-time PM status of the device; this field's initial value is
++    - the runtime PM status of the device; this field's initial value is
+       RPM_SUSPENDED, which means that each device is initially regarded by the
+       PM core as 'suspended', regardless of its real hardware status
+@@ -243,7 +243,7 @@ defined in include/linux/pm.h:
+       and pm_runtime_forbid() helper functions
+   unsigned int no_callbacks;
+-    - indicates that the device does not use the run-time PM callbacks (see
++    - indicates that the device does not use the runtime PM callbacks (see
+       Section 8); it may be modified only by the pm_runtime_no_callbacks()
+       helper function
+@@ -270,16 +270,16 @@ defined in include/linux/pm.h:
+ All of the above fields are members of the 'power' member of 'struct device'.
+-4. Run-time PM Device Helper Functions
++4. Runtime PM Device Helper Functions
+-The following run-time PM helper functions are defined in
++The following runtime PM helper functions are defined in
+ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   void pm_runtime_init(struct device *dev);
+-    - initialize the device run-time PM fields in 'struct dev_pm_info'
++    - initialize the device runtime PM fields in 'struct dev_pm_info'
+   void pm_runtime_remove(struct device *dev);
+-    - make sure that the run-time PM of the device will be disabled after
++    - make sure that the runtime PM of the device will be disabled after
+       removing the device from device hierarchy
+   int pm_runtime_idle(struct device *dev);
+@@ -289,7 +289,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   int pm_runtime_suspend(struct device *dev);
+     - execute the subsystem-level suspend callback for the device; returns 0 on
+-      success, 1 if the device's run-time PM status was already 'suspended', or
++      success, 1 if the device's runtime PM status was already 'suspended', or
+       error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt
+       to suspend the device again in future and -EACCES means that
+       'power.disable_depth' is different from 0
+@@ -302,7 +302,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   int pm_runtime_resume(struct device *dev);
+     - execute the subsystem-level resume callback for the device; returns 0 on
+-      success, 1 if the device's run-time PM status was already 'active' or
++      success, 1 if the device's runtime PM status was already 'active' or
+       error code on failure, where -EAGAIN means it may be safe to attempt to
+       resume the device again in future, but 'power.runtime_error' should be
+       checked additionally, and -EACCES means that 'power.disable_depth' is
+@@ -323,7 +323,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+       device in future, where 'delay' is the time to wait before queuing up a
+       suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work
+       item is queued up immediately); returns 0 on success, 1 if the device's PM
+-      run-time status was already 'suspended', or error code if the request
++      runtime status was already 'suspended', or error code if the request
+       hasn't been scheduled (or queued up if 'delay' is 0); if the execution of
+       ->runtime_suspend() is already scheduled and not yet expired, the new
+       value of 'delay' will be used as the time to wait
+@@ -331,7 +331,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   int pm_request_resume(struct device *dev);
+     - submit a request to execute the subsystem-level resume callback for the
+       device (the request is represented by a work item in pm_wq); returns 0 on
+-      success, 1 if the device's run-time PM status was already 'active', or
++      success, 1 if the device's runtime PM status was already 'active', or
+       error code if the request hasn't been queued up
+   void pm_runtime_get_noresume(struct device *dev);
+@@ -370,14 +370,14 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   void pm_runtime_enable(struct device *dev);
+     - decrement the device's 'power.disable_depth' field; if that field is equal
+-      to zero, the run-time PM helper functions can execute subsystem-level
++      to zero, the runtime PM helper functions can execute subsystem-level
+       callbacks described in Section 2 for the device
+   int pm_runtime_disable(struct device *dev);
+     - increment the device's 'power.disable_depth' field (if the value of that
+       field was previously zero, this prevents subsystem-level runtime PM
+       callbacks from being run for the device), make sure that all of the pending
+-      run-time PM operations on the device are either completed or canceled;
++      runtime PM operations on the device are either completed or canceled;
+       returns 1 if there was a resume request pending and it was necessary to
+       execute the subsystem-level resume callback for the device to satisfy that
+       request, otherwise 0 is returned
+@@ -394,7 +394,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+     - set/unset the power.ignore_children flag of the device
+   int pm_runtime_set_active(struct device *dev);
+-    - clear the device's 'power.runtime_error' flag, set the device's run-time
++    - clear the device's 'power.runtime_error' flag, set the device's runtime
+       PM status to 'active' and update its parent's counter of 'active'
+       children as appropriate (it is only valid to use this function if
+       'power.runtime_error' is set or 'power.disable_depth' is greater than
+@@ -402,7 +402,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+       which is not active and the 'power.ignore_children' flag of which is unset
+   void pm_runtime_set_suspended(struct device *dev);
+-    - clear the device's 'power.runtime_error' flag, set the device's run-time
++    - clear the device's 'power.runtime_error' flag, set the device's runtime
+       PM status to 'suspended' and update its parent's counter of 'active'
+       children as appropriate (it is only valid to use this function if
+       'power.runtime_error' is set or 'power.disable_depth' is greater than
+@@ -423,7 +423,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+       effectively prevent the device from being power managed at run time)
+   void pm_runtime_no_callbacks(struct device *dev);
+-    - set the power.no_callbacks flag for the device and remove the run-time
++    - set the power.no_callbacks flag for the device and remove the runtime
+       PM attributes from /sys/devices/.../power (or prevent them from being
+       added when the device is registered)
+@@ -443,7 +443,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
+     - set the power.autosuspend_delay value to 'delay' (expressed in
+-      milliseconds); if 'delay' is negative then run-time suspends are
++      milliseconds); if 'delay' is negative then runtime suspends are
+       prevented
+   unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+@@ -482,35 +482,35 @@ pm_runtime_resume()
+ pm_runtime_get_sync()
+ pm_runtime_put_sync_suspend()
+-5. Run-time PM Initialization, Device Probing and Removal
++5. Runtime PM Initialization, Device Probing and Removal
+-Initially, the run-time PM is disabled for all devices, which means that the
+-majority of the run-time PM helper funtions described in Section 4 will return
++Initially, the runtime PM is disabled for all devices, which means that the
++majority of the runtime PM helper funtions described in Section 4 will return
+ -EAGAIN until pm_runtime_enable() is called for the device.
+-In addition to that, the initial run-time PM status of all devices is
++In addition to that, the initial runtime PM status of all devices is
+ 'suspended', but it need not reflect the actual physical state of the device.
+ Thus, if the device is initially active (i.e. it is able to process I/O), its
+-run-time PM status must be changed to 'active', with the help of
++runtime PM status must be changed to 'active', with the help of
+ pm_runtime_set_active(), before pm_runtime_enable() is called for the device.
+-However, if the device has a parent and the parent's run-time PM is enabled,
++However, if the device has a parent and the parent's runtime PM is enabled,
+ calling pm_runtime_set_active() for the device will affect the parent, unless
+ the parent's 'power.ignore_children' flag is set.  Namely, in that case the
+ parent won't be able to suspend at run time, using the PM core's helper
+ functions, as long as the child's status is 'active', even if the child's
+-run-time PM is still disabled (i.e. pm_runtime_enable() hasn't been called for
++runtime PM is still disabled (i.e. pm_runtime_enable() hasn't been called for
+ the child yet or pm_runtime_disable() has been called for it).  For this reason,
+ once pm_runtime_set_active() has been called for the device, pm_runtime_enable()
+-should be called for it too as soon as reasonably possible or its run-time PM
++should be called for it too as soon as reasonably possible or its runtime PM
+ status should be changed back to 'suspended' with the help of
+ pm_runtime_set_suspended().
+-If the default initial run-time PM status of the device (i.e. 'suspended')
++If the default initial runtime PM status of the device (i.e. 'suspended')
+ reflects the actual state of the device, its bus type's or its driver's
+ ->probe() callback will likely need to wake it up using one of the PM core's
+ helper functions described in Section 4.  In that case, pm_runtime_resume()
+-should be used.  Of course, for this purpose the device's run-time PM has to be
++should be used.  Of course, for this purpose the device's runtime PM has to be
+ enabled earlier by calling pm_runtime_enable().
+ If the device bus type's or driver's ->probe() callback runs
+@@ -541,29 +541,29 @@ The user space can effectively disallow the driver of the device to power manage
+ it at run time by changing the value of its /sys/devices/.../power/control
+ attribute to "on", which causes pm_runtime_forbid() to be called.  In principle,
+ this mechanism may also be used by the driver to effectively turn off the
+-run-time power management of the device until the user space turns it on.
+-Namely, during the initialization the driver can make sure that the run-time PM
++runtime power management of the device until the user space turns it on.
++Namely, during the initialization the driver can make sure that the runtime PM
+ status of the device is 'active' and call pm_runtime_forbid().  It should be
+ noted, however, that if the user space has already intentionally changed the
+ value of /sys/devices/.../power/control to "auto" to allow the driver to power
+ manage the device at run time, the driver may confuse it by using
+ pm_runtime_forbid() this way.
+-6. Run-time PM and System Sleep
++6. Runtime PM and System Sleep
+-Run-time PM and system sleep (i.e., system suspend and hibernation, also known
++Runtime PM and system sleep (i.e., system suspend and hibernation, also known
+ as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of
+ ways.  If a device is active when a system sleep starts, everything is
+ straightforward.  But what should happen if the device is already suspended?
+-The device may have different wake-up settings for run-time PM and system sleep.
+-For example, remote wake-up may be enabled for run-time suspend but disallowed
++The device may have different wake-up settings for runtime PM and system sleep.
++For example, remote wake-up may be enabled for runtime suspend but disallowed
+ for system sleep (device_may_wakeup(dev) returns 'false').  When this happens,
+ the subsystem-level system suspend callback is responsible for changing the
+ device's wake-up setting (it may leave that to the device driver's system
+ suspend routine).  It may be necessary to resume the device and suspend it again
+ in order to do so.  The same is true if the driver uses different power levels
+-or other settings for run-time suspend and system sleep.
++or other settings for runtime suspend and system sleep.
+ During system resume, the simplest approach is to bring all devices back to full
+ power, even if they had been suspended before the system suspend began.  There
+@@ -582,10 +582,10 @@ are several reasons for this, including:
+   * The device might need to be reset.
+   * Even though the device was suspended, if its usage counter was > 0 then most
+-    likely it would need a run-time resume in the near future anyway.
++    likely it would need a runtime resume in the near future anyway.
+ If the device had been suspended before the system suspend began and it's
+-brought back to full power during resume, then its run-time PM status will have
++brought back to full power during resume, then its runtime PM status will have
+ to be updated to reflect the actual post-system sleep status.  The way to do
+ this is:
+@@ -593,9 +593,9 @@ this is:
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+-The PM core always increments the run-time usage counter before calling the
++The PM core always increments the runtime usage counter before calling the
+ ->suspend() callback and decrements it after calling the ->resume() callback.
+-Hence disabling run-time PM temporarily like this will not cause any runtime
++Hence disabling runtime PM temporarily like this will not cause any runtime
+ suspend attempts to be permanently lost.  If the usage count goes to zero
+ following the return of the ->resume() callback, the ->runtime_idle() callback
+ will be invoked as usual.
+@@ -710,8 +710,8 @@ the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its
+ dev_pm_ops structure pointer.
+ Device drivers that wish to use the same function as a system suspend, freeze,
+-poweroff and run-time suspend callback, and similarly for system resume, thaw,
+-restore, and run-time resume, can achieve this with the help of the
++poweroff and runtime suspend callback, and similarly for system resume, thaw,
++restore, and runtime resume, can achieve this with the help of the
+ UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its
+ last argument to NULL).
+@@ -721,7 +721,7 @@ Some "devices" are only logical sub-devices of their parent and cannot be
+ power-managed on their own.  (The prototype example is a USB interface.  Entire
+ USB devices can go into low-power mode or send wake-up requests, but neither is
+ possible for individual interfaces.)  The drivers for these devices have no
+-need of run-time PM callbacks; if the callbacks did exist, ->runtime_suspend()
++need of runtime PM callbacks; if the callbacks did exist, ->runtime_suspend()
+ and ->runtime_resume() would always return 0 without doing anything else and
+ ->runtime_idle() would always call pm_runtime_suspend().
+@@ -729,7 +729,7 @@ Subsystems can tell the PM core about these devices by calling
+ pm_runtime_no_callbacks().  This should be done after the device structure is
+ initialized and before it is registered (although after device registration is
+ also okay).  The routine will set the device's power.no_callbacks flag and
+-prevent the non-debugging run-time PM sysfs attributes from being created.
++prevent the non-debugging runtime PM sysfs attributes from being created.
+ When power.no_callbacks is set, the PM core will not invoke the
+ ->runtime_idle(), ->runtime_suspend(), or ->runtime_resume() callbacks.
+@@ -737,7 +737,7 @@ Instead it will assume that suspends and resumes always succeed and that idle
+ devices should be suspended.
+ As a consequence, the PM core will never directly inform the device's subsystem
+-or driver about run-time power changes.  Instead, the driver for the device's
++or driver about runtime power changes.  Instead, the driver for the device's
+ parent must take responsibility for telling the device's driver when the
+ parent's power state changes.
+@@ -748,13 +748,13 @@ A device should be put in a low-power state only when there's some reason to
+ think it will remain in that state for a substantial time.  A common heuristic
+ says that a device which hasn't been used for a while is liable to remain
+ unused; following this advice, drivers should not allow devices to be suspended
+-at run-time until they have been inactive for some minimum period.  Even when
++at runtime until they have been inactive for some minimum period.  Even when
+ the heuristic ends up being non-optimal, it will still prevent devices from
+ "bouncing" too rapidly between low-power and full-power states.
+ The term "autosuspend" is an historical remnant.  It doesn't mean that the
+ device is automatically suspended (the subsystem or driver still has to call
+-the appropriate PM routines); rather it means that run-time suspends will
++the appropriate PM routines); rather it means that runtime suspends will
+ automatically be delayed until the desired period of inactivity has elapsed.
+ Inactivity is determined based on the power.last_busy field.  Drivers should
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index ee99025..be7b982 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1,5 +1,5 @@
+ /*
+- * drivers/base/power/runtime.c - Helper functions for device run-time PM
++ * drivers/base/power/runtime.c - Helper functions for device runtime PM
+  *
+  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
+@@ -159,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
+  * @dev: Device to notify the bus type about.
+  * @rpmflags: Flag bits.
+  *
+- * Check if the device's run-time PM status allows it to be suspended.  If
++ * Check if the device's runtime PM status allows it to be suspended.  If
+  * another idle notification has been started earlier, return immediately.  If
+  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
+  * run the ->runtime_idle() callback directly.
+@@ -267,11 +267,11 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+ }
+ /**
+- * rpm_suspend - Carry out run-time suspend of given device.
++ * rpm_suspend - Carry out runtime suspend of given device.
+  * @dev: Device to suspend.
+  * @rpmflags: Flag bits.
+  *
+- * Check if the device's run-time PM status allows it to be suspended.  If
++ * Check if the device's runtime PM status allows it to be suspended.  If
+  * another suspend has been started earlier, either return immediately or wait
+  * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
+  * pending idle notification.  If the RPM_ASYNC flag is set then queue a
+@@ -430,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ }
+ /**
+- * rpm_resume - Carry out run-time resume of given device.
++ * rpm_resume - Carry out runtime resume of given device.
+  * @dev: Device to resume.
+  * @rpmflags: Flag bits.
+  *
+- * Check if the device's run-time PM status allows it to be resumed.  Cancel
++ * Check if the device's runtime PM status allows it to be resumed.  Cancel
+  * any scheduled or pending requests.  If another resume has been started
+  * earlier, either return immediately or wait for it to finish, depending on the
+  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
+@@ -551,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+               spin_lock(&parent->power.lock);
+               /*
+-               * We can resume if the parent's run-time PM is disabled or it
++               * We can resume if the parent's runtime PM is disabled or it
+                * is set to ignore children.
+                */
+               if (!parent->power.disable_depth
+@@ -615,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ }
+ /**
+- * pm_runtime_work - Universal run-time PM work function.
++ * pm_runtime_work - Universal runtime PM work function.
+  * @work: Work structure used for scheduling the execution of this function.
+  *
+  * Use @work to get the device object the work is to be done for, determine what
+- * is to be done and execute the appropriate run-time PM function.
++ * is to be done and execute the appropriate runtime PM function.
+  */
+ static void pm_runtime_work(struct work_struct *work)
+ {
+@@ -718,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
+ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+ /**
+- * __pm_runtime_idle - Entry point for run-time idle operations.
++ * __pm_runtime_idle - Entry point for runtime idle operations.
+  * @dev: Device to send idle notification for.
+  * @rpmflags: Flag bits.
+  *
+@@ -747,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
+ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
+ /**
+- * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
++ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
+  * @dev: Device to suspend.
+  * @rpmflags: Flag bits.
+  *
+@@ -776,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
+ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
+ /**
+- * __pm_runtime_resume - Entry point for run-time resume operations.
++ * __pm_runtime_resume - Entry point for runtime resume operations.
+  * @dev: Device to resume.
+  * @rpmflags: Flag bits.
+  *
+@@ -802,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
+ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
+ /**
+- * __pm_runtime_set_status - Set run-time PM status of a device.
++ * __pm_runtime_set_status - Set runtime PM status of a device.
+  * @dev: Device to handle.
+- * @status: New run-time PM status of the device.
++ * @status: New runtime PM status of the device.
+  *
+- * If run-time PM of the device is disabled or its power.runtime_error field is
++ * If runtime PM of the device is disabled or its power.runtime_error field is
+  * different from zero, the status may be changed either to RPM_ACTIVE, or to
+  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
+  * However, if the device has a parent and the parent is not active, and the
+@@ -852,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
+               /*
+                * It is invalid to put an active child under a parent that is
+-               * not active, has run-time PM enabled and the
++               * not active, has runtime PM enabled and the
+                * 'power.ignore_children' flag unset.
+                */
+               if (!parent->power.disable_depth
+@@ -886,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
+  * @dev: Device to handle.
+  *
+  * Flush all pending requests for the device from pm_wq and wait for all
+- * run-time PM operations involving the device in progress to complete.
++ * runtime PM operations involving the device in progress to complete.
+  *
+  * Should be called under dev->power.lock with interrupts disabled.
+  */
+@@ -934,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev)
+  * Prevent the device from being suspended by incrementing its usage counter and
+  * if there's a pending resume request for the device, wake the device up.
+  * Next, make sure that all pending requests for the device have been flushed
+- * from pm_wq and wait for all run-time PM operations involving the device in
++ * from pm_wq and wait for all runtime PM operations involving the device in
+  * progress to complete.
+  *
+  * Return value:
+@@ -964,18 +964,18 @@ int pm_runtime_barrier(struct device *dev)
+ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
+ /**
+- * __pm_runtime_disable - Disable run-time PM of a device.
++ * __pm_runtime_disable - Disable runtime PM of a device.
+  * @dev: Device to handle.
+  * @check_resume: If set, check if there's a resume request for the device.
+  *
+  * Increment power.disable_depth for the device and if was zero previously,
+- * cancel all pending run-time PM requests for the device and wait for all
++ * cancel all pending runtime PM requests for the device and wait for all
+  * operations in progress to complete.  The device can be either active or
+- * suspended after its run-time PM has been disabled.
++ * suspended after its runtime PM has been disabled.
+  *
+  * If @check_resume is set and there's a resume request pending when
+  * __pm_runtime_disable() is called and power.disable_depth is zero, the
+- * function will wake up the device before disabling its run-time PM.
++ * function will wake up the device before disabling its runtime PM.
+  */
+ void __pm_runtime_disable(struct device *dev, bool check_resume)
+ {
+@@ -988,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
+       /*
+        * Wake up the device if there's a resume request pending, because that
+-       * means there probably is some I/O to process and disabling run-time PM
++       * means there probably is some I/O to process and disabling runtime PM
+        * shouldn't prevent the device from processing the I/O.
+        */
+       if (check_resume && dev->power.request_pending
+@@ -1013,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
+ EXPORT_SYMBOL_GPL(__pm_runtime_disable);
+ /**
+- * pm_runtime_enable - Enable run-time PM of a device.
++ * pm_runtime_enable - Enable runtime PM of a device.
+  * @dev: Device to handle.
+  */
+ void pm_runtime_enable(struct device *dev)
+@@ -1032,7 +1032,7 @@ void pm_runtime_enable(struct device *dev)
+ EXPORT_SYMBOL_GPL(pm_runtime_enable);
+ /**
+- * pm_runtime_forbid - Block run-time PM of a device.
++ * pm_runtime_forbid - Block runtime PM of a device.
+  * @dev: Device to handle.
+  *
+  * Increase the device's usage count and clear its power.runtime_auto flag,
+@@ -1055,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev)
+ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
+ /**
+- * pm_runtime_allow - Unblock run-time PM of a device.
++ * pm_runtime_allow - Unblock runtime PM of a device.
+  * @dev: Device to handle.
+  *
+  * Decrease the device's usage count and set its power.runtime_auto flag.
+@@ -1076,12 +1076,12 @@ void pm_runtime_allow(struct device *dev)
+ EXPORT_SYMBOL_GPL(pm_runtime_allow);
+ /**
+- * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
++ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
+  * @dev: Device to handle.
+  *
+  * Set the power.no_callbacks flag, which tells the PM core that this
+- * device is power-managed through its parent and has no run-time PM
+- * callbacks of its own.  The run-time sysfs attributes will be removed.
++ * device is power-managed through its parent and has no runtime PM
++ * callbacks of its own.  The runtime sysfs attributes will be removed.
+  */
+ void pm_runtime_no_callbacks(struct device *dev)
+ {
+@@ -1157,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
+  * @delay: Value of the new delay in milliseconds.
+  *
+  * Set the device's power.autosuspend_delay value.  If it changes to negative
+- * and the power.use_autosuspend flag is set, prevent run-time suspends.  If it
+- * changes the other way, allow run-time suspends.
++ * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
++ * changes the other way, allow runtime suspends.
+  */
+ void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
+ {
+@@ -1178,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
+  * @dev: Device to handle.
+  * @use: New value for use_autosuspend.
+  *
+- * Set the device's power.use_autosuspend flag, and allow or prevent run-time
++ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
+  * suspends as needed.
+  */
+ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
+@@ -1195,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
+ EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
+ /**
+- * pm_runtime_init - Initialize run-time PM fields in given device object.
++ * pm_runtime_init - Initialize runtime PM fields in given device object.
+  * @dev: Device object to initialize.
+  */
+ void pm_runtime_init(struct device *dev)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0016-PM-Runtime-Prevent-runtime_resume-from-racing-with-p.patch b/patches.runtime_pm/0016-PM-Runtime-Prevent-runtime_resume-from-racing-with-p.patch
new file mode 100644 (file)
index 0000000..b70b4e5
--- /dev/null
@@ -0,0 +1,53 @@
+From 296795de8932876abe22dfe91d6689257a1a9731 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Wed, 6 Jul 2011 10:52:23 +0200
+Subject: PM / Runtime: Prevent runtime_resume from racing with probe
+
+This patch (as1475) adds device_lock() and device_unlock() calls to
+the store methods for the power/control and power/autosuspend_delay_ms
+sysfs attribute files.  We don't want badly timed writes to these
+files to cause runtime_resume callbacks to occur while a driver is
+being probed for a device.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 69c843b45eb3b8f267019e6a05860c9c48337419)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/sysfs.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index a9f5b89..942d6a7 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
+       cp = memchr(buf, '\n', n);
+       if (cp)
+               len = cp - buf;
++      device_lock(dev);
+       if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+               pm_runtime_allow(dev);
+       else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+               pm_runtime_forbid(dev);
+       else
+-              return -EINVAL;
++              n = -EINVAL;
++      device_unlock(dev);
+       return n;
+ }
+@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
+       if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
+               return -EINVAL;
++      device_lock(dev);
+       pm_runtime_set_autosuspend_delay(dev, delay);
++      device_unlock(dev);
+       return n;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0017-PM-Runtime-Consistent-utilization-of-deferred_resume.patch b/patches.runtime_pm/0017-PM-Runtime-Consistent-utilization-of-deferred_resume.patch
new file mode 100644 (file)
index 0000000..25c8b14
--- /dev/null
@@ -0,0 +1,32 @@
+From f7c12d8518c821f17f791e623ba45c9076cba674 Mon Sep 17 00:00:00 2001
+From: ShuoX Liu <shuox.liu@intel.com>
+Date: Fri, 8 Jul 2011 20:53:55 +0200
+Subject: PM / Runtime: Consistent utilization of deferred_resume
+
+dev->power.deferred_resume is used as a bool typically, so change
+one assignment to false from 0, like other places.
+
+Signed-off-by: ShuoX Liu <shuox.liu@intel.com>
+(cherry picked from commit 2cffff1281a74714c9e035322077ec52ffb1f838)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index be7b982..8dc247c 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -389,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       retval = rpm_callback(callback, dev);
+       if (retval) {
+               __update_runtime_status(dev, RPM_ACTIVE);
+-              dev->power.deferred_resume = 0;
++              dev->power.deferred_resume = false;
+               if (retval == -EAGAIN || retval == -EBUSY)
+                       dev->power.runtime_error = 0;
+               else
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0018-PM-Domains-Export-pm_genpd_poweron-in-header.patch b/patches.runtime_pm/0018-PM-Domains-Export-pm_genpd_poweron-in-header.patch
new file mode 100644 (file)
index 0000000..31d191c
--- /dev/null
@@ -0,0 +1,56 @@
+From 86d13a8eb669bac82af250084920075bc1238fd4 Mon Sep 17 00:00:00 2001
+From: Magnus Damm <damm@opensource.se>
+Date: Sun, 10 Jul 2011 10:39:14 +0200
+Subject: PM / Domains: Export pm_genpd_poweron() in header
+
+Allow SoC-specific code to call pm_genpd_poweron().
+
+Signed-off-by: Magnus Damm <damm@opensource.se>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 18b4f3f5d058b590e7189027eeb5d897742ade0a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    2 +-
+ include/linux/pm_domain.h   |    5 +++++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 1aed94c..1f1a7d8 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -37,7 +37,7 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+  * Restore power to @genpd and all of its parents so that it is possible to
+  * resume a device belonging to it.
+  */
+-static int pm_genpd_poweron(struct generic_pm_domain *genpd)
++int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
+       int ret = 0;
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 98491ee..14fb095 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -63,6 +63,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+                                    struct generic_pm_domain *target);
+ extern void pm_genpd_init(struct generic_pm_domain *genpd,
+                         struct dev_power_governor *gov, bool is_off);
++extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+ #else
+ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+@@ -86,6 +87,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ }
+ static inline void pm_genpd_init(struct generic_pm_domain *genpd,
+                                struct dev_power_governor *gov, bool is_off) {}
++static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
++{
++      return -ENOSYS;
++}
+ #endif
+ #endif /* _LINUX_PM_DOMAIN_H */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0019-doc-Konfig-Documentation-power-pm-apm-acpi-.txt.patch b/patches.runtime_pm/0019-doc-Konfig-Documentation-power-pm-apm-acpi-.txt.patch
new file mode 100644 (file)
index 0000000..3c5a407
--- /dev/null
@@ -0,0 +1,48 @@
+From 01c53bc76768055e6ef19c0fc0a6e8ab27964d46 Mon Sep 17 00:00:00 2001
+From: Michael Witten <mfwitten@gmail.com>
+Date: Fri, 8 Jul 2011 21:11:16 +0000
+Subject: doc: Konfig: Documentation/power/{pm => apm-acpi}.txt
+
+Signed-off-by: Michael Witten <mfwitten@gmail.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+(cherry picked from commit 2dc98fd3206f8106520eced769781a21a20707ca)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ arch/x86/Kconfig     |    4 ++--
+ kernel/power/Kconfig |    4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 37357a5..baae844 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1737,8 +1737,8 @@ menuconfig APM
+         machines with more than one CPU.
+         In order to use APM, you will need supporting software. For location
+-        and more information, read <file:Documentation/power/pm.txt> and the
+-        Battery Powered Linux mini-HOWTO, available from
++        and more information, read <file:Documentation/power/apm-acpi.txt>
++        and the Battery Powered Linux mini-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+         This driver does not spin down disk drives (see the hdparm(8)
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index 7b856b3..b1914cb9 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -193,8 +193,8 @@ config APM_EMULATION
+         notification of APM "events" (e.g. battery status change).
+         In order to use APM, you will need supporting software. For location
+-        and more information, read <file:Documentation/power/pm.txt> and the
+-        Battery Powered Linux mini-HOWTO, available from
++        and more information, read <file:Documentation/power/apm-acpi.txt>
++        and the Battery Powered Linux mini-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+         This driver does not spin down disk drives (see the hdparm(8)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0020-PM-Domains-Set-device-state-to-active-during-system-.patch b/patches.runtime_pm/0020-PM-Domains-Set-device-state-to-active-during-system-.patch
new file mode 100644 (file)
index 0000000..a8ee15c
--- /dev/null
@@ -0,0 +1,36 @@
+From 40a34966bff83f0ba043ea289407ca00e2e935a6 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:39:10 +0200
+Subject: PM / Domains: Set device state to "active" during system resume
+
+The runtime PM status of devices in a power domain that is not
+powered off in pm_genpd_complete() should be set to "active", because
+those devices are operational at this point.  Some of them may not be
+in use, though, so make pm_genpd_complete() call pm_runtime_idle()
+in addition to pm_runtime_set_active() for each of them.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 6f00ff78278fd5d6ac110b6903ee042af2d6af91)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 1f1a7d8..0e7e91b 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -786,7 +786,9 @@ static void pm_genpd_complete(struct device *dev)
+       if (run_complete) {
+               pm_generic_complete(dev);
++              pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
++              pm_runtime_idle(dev);
+       }
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0021-PM-Domains-Make-failing-pm_genpd_prepare-clean-up-pr.patch b/patches.runtime_pm/0021-PM-Domains-Make-failing-pm_genpd_prepare-clean-up-pr.patch
new file mode 100644 (file)
index 0000000..f83b600
--- /dev/null
@@ -0,0 +1,53 @@
+From 4b6c811b11eed8ee20d2593a074b1fa689de2650 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:39:21 +0200
+Subject: PM / Domains: Make failing pm_genpd_prepare() clean up properly
+
+If pm_generic_prepare() in pm_genpd_prepare() returns error code,
+the PM domains counter of "prepared" devices should be decremented
+and its suspend_power_off flag should be reset if this counter drops
+down to zero.  Otherwise, the PM domain runtime PM code will not
+handle the domain correctly (it will permanently think that system
+suspend is in progress).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit b6c10c84665912985d0bf9b6ae8ce19fc4298d9f)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 0e7e91b..9a20d93 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -367,6 +367,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+ static int pm_genpd_prepare(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
++      int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -400,7 +401,16 @@ static int pm_genpd_prepare(struct device *dev)
+       mutex_unlock(&genpd->lock);
+-      return pm_generic_prepare(dev);
++      ret = pm_generic_prepare(dev);
++      if (ret) {
++              mutex_lock(&genpd->lock);
++
++              if (--genpd->prepared_count == 0)
++                      genpd->suspend_power_off = false;
++
++              mutex_unlock(&genpd->lock);
++      }
++      return ret;
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0022-PM-Domains-Do-not-execute-device-callbacks-under-loc.patch b/patches.runtime_pm/0022-PM-Domains-Do-not-execute-device-callbacks-under-loc.patch
new file mode 100644 (file)
index 0000000..df43969
--- /dev/null
@@ -0,0 +1,604 @@
+From 3584f0d5ebf424566f61039e5d023ba0a9c00b24 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:39:29 +0200
+Subject: PM / Domains: Do not execute device callbacks under locks
+
+Currently, the .start_device() and .stop_device() callbacks from
+struct generic_pm_domain() as well as the device drivers' runtime PM
+callbacks used by the generic PM domains code are executed under
+the generic PM domain lock.  This, unfortunately, is prone to
+deadlocks, for example if a device and its parent are boths members
+of the same PM domain.  For this reason, it would be better if the
+PM domains code didn't execute device callbacks under the lock.
+
+Rework the locking in the generic PM domains code so that the lock
+is dropped for the execution of device callbacks.  To this end,
+introduce PM domains states reflecting the current status of a PM
+domain and such that the PM domain lock cannot be acquired if the
+status is GPD_STATE_BUSY.  Make threads attempting to acquire a PM
+domain's lock wait until the status changes to either
+GPD_STATE_ACTIVE or GPD_STATE_POWER_OFF.
+
+This change by itself doesn't fix the deadlock problem mentioned
+above, but the mechanism introduced by it will be used for for this
+purpose by a subsequent patch.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 17b75eca7683d4942f4d8d00563fd15f37c39589)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  249 +++++++++++++++++++++++++++++++------------
+ include/linux/pm_domain.h   |   10 +-
+ 2 files changed, 185 insertions(+), 74 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 9a20d93..d06f3bb 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -13,6 +13,8 @@
+ #include <linux/pm_domain.h>
+ #include <linux/slab.h>
+ #include <linux/err.h>
++#include <linux/sched.h>
++#include <linux/suspend.h>
+ #ifdef CONFIG_PM
+@@ -30,6 +32,34 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+                       genpd->sd_count--;
+ }
++static void genpd_acquire_lock(struct generic_pm_domain *genpd)
++{
++      DEFINE_WAIT(wait);
++
++      mutex_lock(&genpd->lock);
++      /*
++       * Wait for the domain to transition into either the active,
++       * or the power off state.
++       */
++      for (;;) {
++              prepare_to_wait(&genpd->status_wait_queue, &wait,
++                              TASK_UNINTERRUPTIBLE);
++              if (genpd->status != GPD_STATE_BUSY)
++                      break;
++              mutex_unlock(&genpd->lock);
++
++              schedule();
++
++              mutex_lock(&genpd->lock);
++      }
++      finish_wait(&genpd->status_wait_queue, &wait);
++}
++
++static void genpd_release_lock(struct generic_pm_domain *genpd)
++{
++      mutex_unlock(&genpd->lock);
++}
++
+ /**
+  * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+  * @genpd: PM domain to power up.
+@@ -39,22 +69,50 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+  */
+ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
++      struct generic_pm_domain *parent = genpd->parent;
++      DEFINE_WAIT(wait);
+       int ret = 0;
+  start:
+-      if (genpd->parent)
+-              mutex_lock(&genpd->parent->lock);
+-      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++      if (parent) {
++              mutex_lock(&parent->lock);
++              mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++      } else {
++              mutex_lock(&genpd->lock);
++      }
++      /*
++       * Wait for the domain to transition into either the active,
++       * or the power off state.
++       */
++      for (;;) {
++              prepare_to_wait(&genpd->status_wait_queue, &wait,
++                              TASK_UNINTERRUPTIBLE);
++              if (genpd->status != GPD_STATE_BUSY)
++                      break;
++              mutex_unlock(&genpd->lock);
++              if (parent)
++                      mutex_unlock(&parent->lock);
++
++              schedule();
+-      if (!genpd->power_is_off
++              if (parent) {
++                      mutex_lock(&parent->lock);
++                      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++              } else {
++                      mutex_lock(&genpd->lock);
++              }
++      }
++      finish_wait(&genpd->status_wait_queue, &wait);
++
++      if (genpd->status == GPD_STATE_ACTIVE
+           || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+               goto out;
+-      if (genpd->parent && genpd->parent->power_is_off) {
++      if (parent && parent->status != GPD_STATE_ACTIVE) {
+               mutex_unlock(&genpd->lock);
+-              mutex_unlock(&genpd->parent->lock);
++              mutex_unlock(&parent->lock);
+-              ret = pm_genpd_poweron(genpd->parent);
++              ret = pm_genpd_poweron(parent);
+               if (ret)
+                       return ret;
+@@ -67,14 +125,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+                       goto out;
+       }
+-      genpd->power_is_off = false;
+-      if (genpd->parent)
+-              genpd->parent->sd_count++;
++      genpd->status = GPD_STATE_ACTIVE;
++      if (parent)
++              parent->sd_count++;
+  out:
+       mutex_unlock(&genpd->lock);
+-      if (genpd->parent)
+-              mutex_unlock(&genpd->parent->lock);
++      if (parent)
++              mutex_unlock(&parent->lock);
+       return ret;
+ }
+@@ -90,6 +148,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+  */
+ static int __pm_genpd_save_device(struct dev_list_entry *dle,
+                                 struct generic_pm_domain *genpd)
++      __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+       struct device *dev = dle->dev;
+       struct device_driver *drv = dev->driver;
+@@ -98,6 +157,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
+       if (dle->need_restore)
+               return 0;
++      mutex_unlock(&genpd->lock);
++
+       if (drv && drv->pm && drv->pm->runtime_suspend) {
+               if (genpd->start_device)
+                       genpd->start_device(dev);
+@@ -108,6 +169,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
+                       genpd->stop_device(dev);
+       }
++      mutex_lock(&genpd->lock);
++
+       if (!ret)
+               dle->need_restore = true;
+@@ -121,6 +184,7 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
+  */
+ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+                                     struct generic_pm_domain *genpd)
++      __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+       struct device *dev = dle->dev;
+       struct device_driver *drv = dev->driver;
+@@ -128,6 +192,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+       if (!dle->need_restore)
+               return;
++      mutex_unlock(&genpd->lock);
++
+       if (drv && drv->pm && drv->pm->runtime_resume) {
+               if (genpd->start_device)
+                       genpd->start_device(dev);
+@@ -138,6 +204,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+                       genpd->stop_device(dev);
+       }
++      mutex_lock(&genpd->lock);
++
+       dle->need_restore = false;
+ }
+@@ -150,13 +218,14 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+  * the @genpd's devices' drivers and remove power from @genpd.
+  */
+ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
++      __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+       struct generic_pm_domain *parent;
+       struct dev_list_entry *dle;
+       unsigned int not_suspended;
+       int ret;
+-      if (genpd->power_is_off || genpd->prepared_count > 0)
++      if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0)
+               return 0;
+       if (genpd->sd_count > 0)
+@@ -175,22 +244,36 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+                       return -EAGAIN;
+       }
++      genpd->status = GPD_STATE_BUSY;
++
+       list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+               ret = __pm_genpd_save_device(dle, genpd);
+               if (ret)
+                       goto err_dev;
+       }
++      mutex_unlock(&genpd->lock);
++
++      parent = genpd->parent;
++      if (parent) {
++              genpd_acquire_lock(parent);
++              mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++      } else {
++              mutex_lock(&genpd->lock);
++      }
++
+       if (genpd->power_off)
+               genpd->power_off(genpd);
+-      genpd->power_is_off = true;
++      genpd->status = GPD_STATE_POWER_OFF;
++      wake_up_all(&genpd->status_wait_queue);
+-      parent = genpd->parent;
+       if (parent) {
+               genpd_sd_counter_dec(parent);
+               if (parent->sd_count == 0)
+                       queue_work(pm_wq, &parent->power_off_work);
++
++              genpd_release_lock(parent);
+       }
+       return 0;
+@@ -199,6 +282,9 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       list_for_each_entry_continue(dle, &genpd->dev_list, node)
+               __pm_genpd_restore_device(dle, genpd);
++      genpd->status = GPD_STATE_ACTIVE;
++      wake_up_all(&genpd->status_wait_queue);
++
+       return ret;
+ }
+@@ -212,13 +298,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
+       genpd = container_of(work, struct generic_pm_domain, power_off_work);
+-      if (genpd->parent)
+-              mutex_lock(&genpd->parent->lock);
+-      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
++      genpd_acquire_lock(genpd);
+       pm_genpd_poweroff(genpd);
+-      mutex_unlock(&genpd->lock);
+-      if (genpd->parent)
+-              mutex_unlock(&genpd->parent->lock);
++      genpd_release_lock(genpd);
+ }
+ /**
+@@ -239,23 +321,17 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->parent)
+-              mutex_lock(&genpd->parent->lock);
+-      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-
+       if (genpd->stop_device) {
+               int ret = genpd->stop_device(dev);
+               if (ret)
+-                      goto out;
++                      return ret;
+       }
++
++      genpd_acquire_lock(genpd);
+       genpd->in_progress++;
+       pm_genpd_poweroff(genpd);
+       genpd->in_progress--;
+-
+- out:
+-      mutex_unlock(&genpd->lock);
+-      if (genpd->parent)
+-              mutex_unlock(&genpd->parent->lock);
++      genpd_release_lock(genpd);
+       return 0;
+ }
+@@ -276,9 +352,6 @@ static void __pm_genpd_runtime_resume(struct device *dev,
+                       break;
+               }
+       }
+-
+-      if (genpd->start_device)
+-              genpd->start_device(dev);
+ }
+ /**
+@@ -304,9 +377,15 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       if (ret)
+               return ret;
+-      mutex_lock(&genpd->lock);
++      genpd_acquire_lock(genpd);
++      genpd->status = GPD_STATE_BUSY;
+       __pm_genpd_runtime_resume(dev, genpd);
+-      mutex_unlock(&genpd->lock);
++      genpd->status = GPD_STATE_ACTIVE;
++      wake_up_all(&genpd->status_wait_queue);
++      genpd_release_lock(genpd);
++
++      if (genpd->start_device)
++              genpd->start_device(dev);
+       return 0;
+ }
+@@ -339,7 +418,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+ {
+       struct generic_pm_domain *parent = genpd->parent;
+-      if (genpd->power_is_off)
++      if (genpd->status == GPD_STATE_POWER_OFF)
+               return;
+       if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+@@ -348,7 +427,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+       if (genpd->power_off)
+               genpd->power_off(genpd);
+-      genpd->power_is_off = true;
++      genpd->status = GPD_STATE_POWER_OFF;
+       if (parent) {
+               genpd_sd_counter_dec(parent);
+               pm_genpd_sync_poweroff(parent);
+@@ -375,32 +454,41 @@ static int pm_genpd_prepare(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      mutex_lock(&genpd->lock);
++      /*
++       * If a wakeup request is pending for the device, it should be woken up
++       * at this point and a system wakeup event should be reported if it's
++       * set up to wake up the system from sleep states.
++       */
++      pm_runtime_get_noresume(dev);
++      if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
++              pm_wakeup_event(dev, 0);
++
++      if (pm_wakeup_pending()) {
++              pm_runtime_put_sync(dev);
++              return -EBUSY;
++      }
++
++      genpd_acquire_lock(genpd);
+       if (genpd->prepared_count++ == 0)
+-              genpd->suspend_power_off = genpd->power_is_off;
++              genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
++
++      genpd_release_lock(genpd);
+       if (genpd->suspend_power_off) {
+-              mutex_unlock(&genpd->lock);
++              pm_runtime_put_noidle(dev);
+               return 0;
+       }
+       /*
+-       * If the device is in the (runtime) "suspended" state, call
+-       * .start_device() for it, if defined.
+-       */
+-      if (pm_runtime_suspended(dev))
+-              __pm_genpd_runtime_resume(dev, genpd);
+-
+-      /*
+-       * Do not check if runtime resume is pending at this point, because it
+-       * has been taken care of already and if pm_genpd_poweron() ran at this
+-       * point as a result of the check, it would deadlock.
++       * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
++       * so pm_genpd_poweron() will return immediately, but if the device
++       * is suspended (e.g. it's been stopped by .stop_device()), we need
++       * to make it operational.
+        */
++      pm_runtime_resume(dev);
+       __pm_runtime_disable(dev, false);
+-      mutex_unlock(&genpd->lock);
+-
+       ret = pm_generic_prepare(dev);
+       if (ret) {
+               mutex_lock(&genpd->lock);
+@@ -409,7 +497,10 @@ static int pm_genpd_prepare(struct device *dev)
+                       genpd->suspend_power_off = false;
+               mutex_unlock(&genpd->lock);
++              pm_runtime_enable(dev);
+       }
++
++      pm_runtime_put_sync(dev);
+       return ret;
+ }
+@@ -726,7 +817,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
+        * guaranteed that this function will never run twice in parallel for
+        * the same PM domain, so it is not necessary to use locking here.
+        */
+-      genpd->power_is_off = true;
++      genpd->status = GPD_STATE_POWER_OFF;
+       if (genpd->suspend_power_off) {
+               /*
+                * The boot kernel might put the domain into the power on state,
+@@ -836,9 +927,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+               return -EINVAL;
+-      mutex_lock(&genpd->lock);
++      genpd_acquire_lock(genpd);
+-      if (genpd->power_is_off) {
++      if (genpd->status == GPD_STATE_POWER_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+@@ -870,7 +961,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+       spin_unlock_irq(&dev->power.lock);
+  out:
+-      mutex_unlock(&genpd->lock);
++      genpd_release_lock(genpd);
+       return ret;
+ }
+@@ -891,7 +982,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+               return -EINVAL;
+-      mutex_lock(&genpd->lock);
++      genpd_acquire_lock(genpd);
+       if (genpd->prepared_count > 0) {
+               ret = -EAGAIN;
+@@ -915,7 +1006,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+       }
+  out:
+-      mutex_unlock(&genpd->lock);
++      genpd_release_lock(genpd);
+       return ret;
+ }
+@@ -934,9 +1025,19 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+               return -EINVAL;
+-      mutex_lock(&genpd->lock);
++ start:
++      genpd_acquire_lock(genpd);
++      mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+-      if (genpd->power_is_off && !new_subdomain->power_is_off) {
++      if (new_subdomain->status != GPD_STATE_POWER_OFF
++          && new_subdomain->status != GPD_STATE_ACTIVE) {
++              mutex_unlock(&new_subdomain->lock);
++              genpd_release_lock(genpd);
++              goto start;
++      }
++
++      if (genpd->status == GPD_STATE_POWER_OFF
++          &&  new_subdomain->status != GPD_STATE_POWER_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+@@ -948,17 +1049,14 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+               }
+       }
+-      mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+-
+       list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+       new_subdomain->parent = genpd;
+-      if (!subdomain->power_is_off)
++      if (subdomain->status != GPD_STATE_POWER_OFF)
+               genpd->sd_count++;
+-      mutex_unlock(&new_subdomain->lock);
+-
+  out:
+-      mutex_unlock(&genpd->lock);
++      mutex_unlock(&new_subdomain->lock);
++      genpd_release_lock(genpd);
+       return ret;
+ }
+@@ -977,7 +1075,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+               return -EINVAL;
+-      mutex_lock(&genpd->lock);
++ start:
++      genpd_acquire_lock(genpd);
+       list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+               if (subdomain != target)
+@@ -985,9 +1084,16 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+               mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
++              if (subdomain->status != GPD_STATE_POWER_OFF
++                  && subdomain->status != GPD_STATE_ACTIVE) {
++                      mutex_unlock(&subdomain->lock);
++                      genpd_release_lock(genpd);
++                      goto start;
++              }
++
+               list_del(&subdomain->sd_node);
+               subdomain->parent = NULL;
+-              if (!subdomain->power_is_off)
++              if (subdomain->status != GPD_STATE_POWER_OFF)
+                       genpd_sd_counter_dec(genpd);
+               mutex_unlock(&subdomain->lock);
+@@ -996,7 +1102,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+               break;
+       }
+-      mutex_unlock(&genpd->lock);
++      genpd_release_lock(genpd);
+       return ret;
+ }
+@@ -1022,7 +1128,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+       genpd->in_progress = 0;
+       genpd->sd_count = 0;
+-      genpd->power_is_off = is_off;
++      genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
++      init_waitqueue_head(&genpd->status_wait_queue);
+       genpd->device_count = 0;
+       genpd->suspended_count = 0;
+       genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 14fb095..c71457c 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -11,8 +11,11 @@
+ #include <linux/device.h>
+-#define GPD_IN_SUSPEND        1
+-#define GPD_POWER_OFF 2
++enum gpd_status {
++      GPD_STATE_ACTIVE = 0,   /* PM domain is active */
++      GPD_STATE_BUSY,         /* Something is happening to the PM domain */
++      GPD_STATE_POWER_OFF,    /* PM domain is off */
++};
+ struct dev_power_governor {
+       bool (*power_down_ok)(struct dev_pm_domain *domain);
+@@ -29,7 +32,8 @@ struct generic_pm_domain {
+       struct work_struct power_off_work;
+       unsigned int in_progress;       /* Number of devices being suspended now */
+       unsigned int sd_count;  /* Number of subdomains with power "on" */
+-      bool power_is_off;      /* Whether or not power has been removed */
++      enum gpd_status status; /* Current state of the domain */
++      wait_queue_head_t status_wait_queue;
+       unsigned int device_count;      /* Number of devices */
+       unsigned int suspended_count;   /* System suspend device counter */
+       unsigned int prepared_count;    /* Suspend counter of prepared devices */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0023-PM-Domains-Allow-callbacks-to-execute-all-runtime-PM.patch b/patches.runtime_pm/0023-PM-Domains-Allow-callbacks-to-execute-all-runtime-PM.patch
new file mode 100644 (file)
index 0000000..1df7f0c
--- /dev/null
@@ -0,0 +1,383 @@
+From 99ba4e79b8fde359d9c5fd24a699562cc6debade Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:39:36 +0200
+Subject: PM / Domains: Allow callbacks to execute all runtime PM helpers
+
+A deadlock may occur if one of the PM domains' .start_device() or
+.stop_device() callbacks or a device driver's .runtime_suspend() or
+.runtime_resume() callback executed by the core generic PM domain
+code uses a "wrong" runtime PM helper function.  This happens, for
+example, if .runtime_resume() from one device's driver calls
+pm_runtime_resume() for another device in the same PM domain.
+A similar situation may take place if a device's parent is in the
+same PM domain, in which case the runtime PM framework may execute
+pm_genpd_runtime_resume() automatically for the parent (if it is
+suspended at the moment).  This, of course, is undesirable, so
+the generic PM domains code should be modified to prevent it from
+happening.
+
+The runtime PM framework guarantees that pm_genpd_runtime_suspend()
+and pm_genpd_runtime_resume() won't be executed in parallel for
+the same device, so the generic PM domains code need not worry
+about those cases.  Still, it needs to prevent the other possible
+race conditions between pm_genpd_runtime_suspend(),
+pm_genpd_runtime_resume(), pm_genpd_poweron() and pm_genpd_poweroff()
+from happening and it needs to avoid deadlocks at the same time.
+To this end, modify the generic PM domains code to relax
+synchronization rules so that:
+
+* pm_genpd_poweron() doesn't wait for the PM domain status to
+  change from GPD_STATE_BUSY.  If it finds that the status is
+  not GPD_STATE_POWER_OFF, it returns without powering the domain on
+  (it may modify the status depending on the circumstances).
+
+* pm_genpd_poweroff() returns as soon as it finds that the PM
+  domain's status changed from GPD_STATE_BUSY after it's released
+  the PM domain's lock.
+
+* pm_genpd_runtime_suspend() doesn't wait for the PM domain status
+  to change from GPD_STATE_BUSY after executing the domain's
+  .stop_device() callback and executes pm_genpd_poweroff() only
+  if pm_genpd_runtime_resume() is not executed in parallel.
+
+* pm_genpd_runtime_resume() doesn't wait for the PM domain status
+  to change from GPD_STATE_BUSY after executing pm_genpd_poweron()
+  and sets the domain's status to GPD_STATE_BUSY and increments its
+  counter of resuming devices (introduced by this change) immediately
+  after acquiring the lock.  The counter of resuming devices is then
+  decremented after executing __pm_genpd_runtime_resume() for the
+  device and the domain's status is reset to GPD_STATE_ACTIVE (unless
+  there are more resuming devices in the domain, in which case the
+  status remains GPD_STATE_BUSY).
+
+This way, for example, if a device driver's .runtime_resume()
+callback executes pm_runtime_resume() for another device in the same
+PM domain, pm_genpd_poweron() called by pm_genpd_runtime_resume()
+invoked by the runtime PM framework will not block and it will see
+that there's nothing to do for it.  Next, the PM domain's lock will
+be acquired without waiting for its status to change from
+GPD_STATE_BUSY and the device driver's .runtime_resume() callback
+will be executed.  In turn, if pm_runtime_suspend() is executed by
+one device driver's .runtime_resume() callback for another device in
+the same PM domain, pm_genpd_poweroff() executed by
+pm_genpd_runtime_suspend() invoked by the runtime PM framework as a
+result will notice that one of the devices in the domain is being
+resumed, so it will return immediately.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c6d22b37263607ba5aeeb2e11169fa65caa29bee)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  144 +++++++++++++++++++++++++++++--------------
+ include/linux/pm_domain.h   |    3 +
+ 2 files changed, 102 insertions(+), 45 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index d06f3bb..7e6cc8a 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -44,7 +44,8 @@ static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+       for (;;) {
+               prepare_to_wait(&genpd->status_wait_queue, &wait,
+                               TASK_UNINTERRUPTIBLE);
+-              if (genpd->status != GPD_STATE_BUSY)
++              if (genpd->status == GPD_STATE_ACTIVE
++                  || genpd->status == GPD_STATE_POWER_OFF)
+                       break;
+               mutex_unlock(&genpd->lock);
+@@ -60,6 +61,12 @@ static void genpd_release_lock(struct generic_pm_domain *genpd)
+       mutex_unlock(&genpd->lock);
+ }
++static void genpd_set_active(struct generic_pm_domain *genpd)
++{
++      if (genpd->resume_count == 0)
++              genpd->status = GPD_STATE_ACTIVE;
++}
++
+ /**
+  * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+  * @genpd: PM domain to power up.
+@@ -75,42 +82,24 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+  start:
+       if (parent) {
+-              mutex_lock(&parent->lock);
++              genpd_acquire_lock(parent);
+               mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+       } else {
+               mutex_lock(&genpd->lock);
+       }
+-      /*
+-       * Wait for the domain to transition into either the active,
+-       * or the power off state.
+-       */
+-      for (;;) {
+-              prepare_to_wait(&genpd->status_wait_queue, &wait,
+-                              TASK_UNINTERRUPTIBLE);
+-              if (genpd->status != GPD_STATE_BUSY)
+-                      break;
+-              mutex_unlock(&genpd->lock);
+-              if (parent)
+-                      mutex_unlock(&parent->lock);
+-
+-              schedule();
+-
+-              if (parent) {
+-                      mutex_lock(&parent->lock);
+-                      mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-              } else {
+-                      mutex_lock(&genpd->lock);
+-              }
+-      }
+-      finish_wait(&genpd->status_wait_queue, &wait);
+       if (genpd->status == GPD_STATE_ACTIVE
+           || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+               goto out;
++      if (genpd->status != GPD_STATE_POWER_OFF) {
++              genpd_set_active(genpd);
++              goto out;
++      }
++
+       if (parent && parent->status != GPD_STATE_ACTIVE) {
+               mutex_unlock(&genpd->lock);
+-              mutex_unlock(&parent->lock);
++              genpd_release_lock(parent);
+               ret = pm_genpd_poweron(parent);
+               if (ret)
+@@ -125,14 +114,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+                       goto out;
+       }
+-      genpd->status = GPD_STATE_ACTIVE;
++      genpd_set_active(genpd);
+       if (parent)
+               parent->sd_count++;
+  out:
+       mutex_unlock(&genpd->lock);
+       if (parent)
+-              mutex_unlock(&parent->lock);
++              genpd_release_lock(parent);
+       return ret;
+ }
+@@ -210,6 +199,20 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+ }
+ /**
++ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
++ * @genpd: PM domain to check.
++ *
++ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
++ * a "power off" operation, which means that a "power on" has occured in the
++ * meantime, or if its resume_count field is different from zero, which means
++ * that one of its devices has been resumed in the meantime.
++ */
++static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
++{
++      return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
++}
++
++/**
+  * pm_genpd_poweroff - Remove power from a given PM domain.
+  * @genpd: PM domain to power down.
+  *
+@@ -223,9 +226,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       struct generic_pm_domain *parent;
+       struct dev_list_entry *dle;
+       unsigned int not_suspended;
+-      int ret;
++      int ret = 0;
+-      if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0)
++ start:
++      /*
++       * Do not try to power off the domain in the following situations:
++       * (1) The domain is already in the "power off" state.
++       * (2) System suspend is in progress.
++       * (3) One of the domain's devices is being resumed right now.
++       */
++      if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
++          || genpd->resume_count > 0)
+               return 0;
+       if (genpd->sd_count > 0)
+@@ -239,34 +250,54 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       if (not_suspended > genpd->in_progress)
+               return -EBUSY;
++      if (genpd->poweroff_task) {
++              /*
++               * Another instance of pm_genpd_poweroff() is executing
++               * callbacks, so tell it to start over and return.
++               */
++              genpd->status = GPD_STATE_REPEAT;
++              return 0;
++      }
++
+       if (genpd->gov && genpd->gov->power_down_ok) {
+               if (!genpd->gov->power_down_ok(&genpd->domain))
+                       return -EAGAIN;
+       }
+       genpd->status = GPD_STATE_BUSY;
++      genpd->poweroff_task = current;
+       list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+               ret = __pm_genpd_save_device(dle, genpd);
+               if (ret)
+                       goto err_dev;
+-      }
+-      mutex_unlock(&genpd->lock);
++              if (genpd_abort_poweroff(genpd))
++                      goto out;
++
++              if (genpd->status == GPD_STATE_REPEAT) {
++                      genpd->poweroff_task = NULL;
++                      goto start;
++              }
++      }
+       parent = genpd->parent;
+       if (parent) {
++              mutex_unlock(&genpd->lock);
++
+               genpd_acquire_lock(parent);
+               mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-      } else {
+-              mutex_lock(&genpd->lock);
++
++              if (genpd_abort_poweroff(genpd)) {
++                      genpd_release_lock(parent);
++                      goto out;
++              }
+       }
+       if (genpd->power_off)
+               genpd->power_off(genpd);
+       genpd->status = GPD_STATE_POWER_OFF;
+-      wake_up_all(&genpd->status_wait_queue);
+       if (parent) {
+               genpd_sd_counter_dec(parent);
+@@ -276,16 +307,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+               genpd_release_lock(parent);
+       }
+-      return 0;
++ out:
++      genpd->poweroff_task = NULL;
++      wake_up_all(&genpd->status_wait_queue);
++      return ret;
+  err_dev:
+       list_for_each_entry_continue(dle, &genpd->dev_list, node)
+               __pm_genpd_restore_device(dle, genpd);
+-      genpd->status = GPD_STATE_ACTIVE;
+-      wake_up_all(&genpd->status_wait_queue);
+-
+-      return ret;
++      genpd_set_active(genpd);
++      goto out;
+ }
+ /**
+@@ -327,11 +359,11 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+                       return ret;
+       }
+-      genpd_acquire_lock(genpd);
++      mutex_lock(&genpd->lock);
+       genpd->in_progress++;
+       pm_genpd_poweroff(genpd);
+       genpd->in_progress--;
+-      genpd_release_lock(genpd);
++      mutex_unlock(&genpd->lock);
+       return 0;
+ }
+@@ -365,6 +397,7 @@ static void __pm_genpd_runtime_resume(struct device *dev,
+ static int pm_genpd_runtime_resume(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
++      DEFINE_WAIT(wait);
+       int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -377,12 +410,31 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       if (ret)
+               return ret;
+-      genpd_acquire_lock(genpd);
++      mutex_lock(&genpd->lock);
+       genpd->status = GPD_STATE_BUSY;
++      genpd->resume_count++;
++      for (;;) {
++              prepare_to_wait(&genpd->status_wait_queue, &wait,
++                              TASK_UNINTERRUPTIBLE);
++              /*
++               * If current is the powering off task, we have been called
++               * reentrantly from one of the device callbacks, so we should
++               * not wait.
++               */
++              if (!genpd->poweroff_task || genpd->poweroff_task == current)
++                      break;
++              mutex_unlock(&genpd->lock);
++
++              schedule();
++
++              mutex_lock(&genpd->lock);
++      }
++      finish_wait(&genpd->status_wait_queue, &wait);
+       __pm_genpd_runtime_resume(dev, genpd);
+-      genpd->status = GPD_STATE_ACTIVE;
++      genpd->resume_count--;
++      genpd_set_active(genpd);
+       wake_up_all(&genpd->status_wait_queue);
+-      genpd_release_lock(genpd);
++      mutex_unlock(&genpd->lock);
+       if (genpd->start_device)
+               genpd->start_device(dev);
+@@ -1130,6 +1182,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->sd_count = 0;
+       genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+       init_waitqueue_head(&genpd->status_wait_queue);
++      genpd->poweroff_task = NULL;
++      genpd->resume_count = 0;
+       genpd->device_count = 0;
+       genpd->suspended_count = 0;
+       genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index c71457c..feb80af 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -14,6 +14,7 @@
+ enum gpd_status {
+       GPD_STATE_ACTIVE = 0,   /* PM domain is active */
+       GPD_STATE_BUSY,         /* Something is happening to the PM domain */
++      GPD_STATE_REPEAT,       /* Power off in progress, to be repeated */
+       GPD_STATE_POWER_OFF,    /* PM domain is off */
+ };
+@@ -34,6 +35,8 @@ struct generic_pm_domain {
+       unsigned int sd_count;  /* Number of subdomains with power "on" */
+       enum gpd_status status; /* Current state of the domain */
+       wait_queue_head_t status_wait_queue;
++      struct task_struct *poweroff_task;      /* Powering off task */
++      unsigned int resume_count;      /* Number of devices being resumed */
+       unsigned int device_count;      /* Number of devices */
+       unsigned int suspended_count;   /* System suspend device counter */
+       unsigned int prepared_count;    /* Suspend counter of prepared devices */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0024-PM-Domains-Do-not-restore-all-devices-on-power-off-e.patch b/patches.runtime_pm/0024-PM-Domains-Do-not-restore-all-devices-on-power-off-e.patch
new file mode 100644 (file)
index 0000000..41818ba
--- /dev/null
@@ -0,0 +1,52 @@
+From f796cffbe45a4c0743426d0b68ae7c47d73779d9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:39:48 +0200
+Subject: PM / Domains: Do not restore all devices on power off error
+
+Since every device in a PM domain has its own need_restore
+flag, which is set by __pm_genpd_save_device(), there's no need to
+walk the domain's device list and restore all devices on an error
+from one of the drivers' .runtime_suspend() callbacks.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 697a7f3727b53c7d4c927948bbe1f6afc4fabfde)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 7e6cc8a..7b20801 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -269,8 +269,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+               ret = __pm_genpd_save_device(dle, genpd);
+-              if (ret)
+-                      goto err_dev;
++              if (ret) {
++                      genpd_set_active(genpd);
++                      goto out;
++              }
+               if (genpd_abort_poweroff(genpd))
+                       goto out;
+@@ -311,13 +313,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       genpd->poweroff_task = NULL;
+       wake_up_all(&genpd->status_wait_queue);
+       return ret;
+-
+- err_dev:
+-      list_for_each_entry_continue(dle, &genpd->dev_list, node)
+-              __pm_genpd_restore_device(dle, genpd);
+-
+-      genpd_set_active(genpd);
+-      goto out;
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0025-PM-Domains-Improve-handling-of-wakeup-devices-during.patch b/patches.runtime_pm/0025-PM-Domains-Improve-handling-of-wakeup-devices-during.patch
new file mode 100644 (file)
index 0000000..90ad20e
--- /dev/null
@@ -0,0 +1,88 @@
+From cac6f0b0d91888b756691c97ac1bb4deca217f24 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:39:57 +0200
+Subject: PM / Domains: Improve handling of wakeup devices during system
+ suspend
+
+Kevin points out that if there's a device that can wake up the system
+from sleep states, but it doesn't generate wakeup signals by itself
+(they are generated on its behalf by other parts of the system) and
+it currently is not enabled to wake up the system (that is,
+device_may_wakeup() returns "false" for it), we may need to change
+its wakeup settings during system suspend (for example, the device
+might have been configured to signal remote wakeup from the system's
+working state, as needed by runtime PM).  Therefore the generic PM
+domains code should invoke the system suspend callbacks provided by
+the device's driver, which it doesn't do if the PM domain is powered
+off during the system suspend's "prepare" stage.  This is a valid
+point.  Moreover, this code also should make sure that system wakeup
+devices that are enabled to wake up the system from sleep states and
+have to remain active for this purpose are not suspended while the
+system is in a sleep state.
+
+To avoid the above issues, make the generic PM domains' .prepare()
+routine, pm_genpd_prepare(), force runtime resume of devices whose
+system wakeup settings may need to be changed during system suspend
+or that should remain active while the system is in a sleep state to
+be able to wake it up from that state.
+
+Reported-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 4ecd6e651dd25ebbf0cc53c68162c0ab08641725)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 7b20801..b6e29ff 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -482,6 +482,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+ }
+ /**
++ * resume_needed - Check whether to resume a device before system suspend.
++ * @dev: Device to check.
++ * @genpd: PM domain the device belongs to.
++ *
++ * There are two cases in which a device that can wake up the system from sleep
++ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
++ * to wake up the system and it has to remain active for this purpose while the
++ * system is in the sleep state and (2) if the device is not enabled to wake up
++ * the system from sleep states and it generally doesn't generate wakeup signals
++ * by itself (those signals are generated on its behalf by other parts of the
++ * system).  In the latter case it may be necessary to reconfigure the device's
++ * wakeup settings during system suspend, because it may have been set up to
++ * signal remote wakeup from the system's working state as needed by runtime PM.
++ * Return 'true' in either of the above cases.
++ */
++static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
++{
++      bool active_wakeup;
++
++      if (!device_can_wakeup(dev))
++              return false;
++
++      active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
++      return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
++}
++
++/**
+  * pm_genpd_prepare - Start power transition of a device in a PM domain.
+  * @dev: Device to start the transition of.
+  *
+@@ -515,6 +542,9 @@ static int pm_genpd_prepare(struct device *dev)
+               return -EBUSY;
+       }
++      if (resume_needed(dev, genpd))
++              pm_runtime_resume(dev);
++
+       genpd_acquire_lock(genpd);
+       if (genpd->prepared_count++ == 0)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0026-PM-Domains-Queue-up-power-off-work-only-if-it-is-not.patch b/patches.runtime_pm/0026-PM-Domains-Queue-up-power-off-work-only-if-it-is-not.patch
new file mode 100644 (file)
index 0000000..62a2454
--- /dev/null
@@ -0,0 +1,56 @@
+From ad6cdcbd34d790a790c55f44a04f36dd4bf01680 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 12 Jul 2011 00:40:03 +0200
+Subject: PM / Domains: Queue up power off work only if it is not pending
+
+In theory it is possible that pm_genpd_poweroff() for two different
+subdomains of the same parent domain will attempt to queue up the
+execution of pm_genpd_poweroff() for the parent twice in a row.  This
+would lead to unpleasant consequences, so prevent it from happening
+by checking if genpd->power_off_work is pending before attempting to
+queue it up.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 56375fd420f851944960bd53dbb08d674f4d9406)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index b6e29ff..c3e4e29 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -213,6 +213,19 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+ }
+ /**
++ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
++ * @genpd: PM domait to power off.
++ *
++ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
++ * before.
++ */
++static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
++{
++      if (!work_pending(&genpd->power_off_work))
++              queue_work(pm_wq, &genpd->power_off_work);
++}
++
++/**
+  * pm_genpd_poweroff - Remove power from a given PM domain.
+  * @genpd: PM domain to power down.
+  *
+@@ -304,7 +317,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       if (parent) {
+               genpd_sd_counter_dec(parent);
+               if (parent->sd_count == 0)
+-                      queue_work(pm_wq, &parent->power_off_work);
++                      genpd_queue_power_off_work(parent);
+               genpd_release_lock(parent);
+       }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0027-PM-Runtime-Add-new-helper-function-pm_runtime_status.patch b/patches.runtime_pm/0027-PM-Runtime-Add-new-helper-function-pm_runtime_status.patch
new file mode 100644 (file)
index 0000000..f7685eb
--- /dev/null
@@ -0,0 +1,63 @@
+From 58b2b76de232e544666cbc47c97d86a5c5ad99cf Mon Sep 17 00:00:00 2001
+From: Kevin Hilman <khilman@ti.com>
+Date: Tue, 12 Jul 2011 11:17:09 +0200
+Subject: PM / Runtime: Add new helper function: pm_runtime_status_suspended()
+
+This boolean function simply returns whether or not the runtime status
+of the device is 'suspended'.  Unlike pm_runtime_suspended(), this
+function returns the runtime status whether or not runtime PM for the
+device has been disabled or not.
+
+Also add entry to Documentation/power/runtime.txt
+
+Signed-off-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit f3393b62f157cc87f8d78247e97b87778dc077b8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |    3 +++
+ include/linux/pm_runtime.h         |    6 ++++++
+ 2 files changed, 9 insertions(+)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 40e47c7..14dd3c6 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -412,6 +412,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+     - return true if the device's runtime PM status is 'suspended' and its
+       'power.disable_depth' field is equal to zero, or false otherwise
++  bool pm_runtime_status_suspended(struct device *dev);
++    - return true if the device's runtime PM status is 'suspended'
++
+   void pm_runtime_allow(struct device *dev);
+     - set the power.runtime_auto flag for the device and decrease its usage
+       counter (used by the /sys/devices/.../power/control interface to
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index dfb8539..daac05d 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -82,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev)
+               && !dev->power.disable_depth;
+ }
++static inline bool pm_runtime_status_suspended(struct device *dev)
++{
++      return dev->power.runtime_status == RPM_SUSPENDED;
++}
++
+ static inline bool pm_runtime_enabled(struct device *dev)
+ {
+       return !dev->power.disable_depth;
+@@ -130,6 +135,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
+ static inline bool device_run_wake(struct device *dev) { return false; }
+ static inline void device_set_run_wake(struct device *dev, bool enable) {}
+ static inline bool pm_runtime_suspended(struct device *dev) { return false; }
++static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
+ static inline bool pm_runtime_enabled(struct device *dev) { return false; }
+ static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0028-PM-Domains-Introduce-function-to-power-off-all-unuse.patch b/patches.runtime_pm/0028-PM-Domains-Introduce-function-to-power-off-all-unuse.patch
new file mode 100644 (file)
index 0000000..db0921a
--- /dev/null
@@ -0,0 +1,88 @@
+From 9bb7e07116765263503741136578ab4eb1dbd246 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 13 Jul 2011 12:31:52 +0200
+Subject: PM / Domains: Introduce function to power off all unused PM domains
+
+Add a new function pm_genpd_poweroff_unused() queuing up the
+execution of pm_genpd_poweroff() for every initialized generic PM
+domain.  Calling it will cause every generic PM domain without
+devices in use to be powered off.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit 5125bbf3880755419eff68672623cde49c4f31e8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   21 +++++++++++++++++++++
+ include/linux/pm_domain.h   |    3 +++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index c3e4e29..c2c537d 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -16,6 +16,9 @@
+ #include <linux/sched.h>
+ #include <linux/suspend.h>
++static LIST_HEAD(gpd_list);
++static DEFINE_MUTEX(gpd_list_lock);
++
+ #ifdef CONFIG_PM
+ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+@@ -1241,4 +1244,22 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+       genpd->domain.ops.restore = pm_genpd_restore;
+       genpd->domain.ops.complete = pm_genpd_complete;
++      mutex_lock(&gpd_list_lock);
++      list_add(&genpd->gpd_list_node, &gpd_list);
++      mutex_unlock(&gpd_list_lock);
++}
++
++/**
++ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
++ */
++void pm_genpd_poweroff_unused(void)
++{
++      struct generic_pm_domain *genpd;
++
++      mutex_lock(&gpd_list_lock);
++
++      list_for_each_entry(genpd, &gpd_list, gpd_list_node)
++              genpd_queue_power_off_work(genpd);
++
++      mutex_unlock(&gpd_list_lock);
+ }
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index feb80af..3e4f3d3 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -24,6 +24,7 @@ struct dev_power_governor {
+ struct generic_pm_domain {
+       struct dev_pm_domain domain;    /* PM domain operations */
++      struct list_head gpd_list_node; /* Node in the global PM domains list */
+       struct list_head sd_node;       /* Node in the parent's subdomain list */
+       struct generic_pm_domain *parent;       /* Parent PM domain */
+       struct list_head sd_list;       /* List of dubdomains */
+@@ -71,6 +72,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ extern void pm_genpd_init(struct generic_pm_domain *genpd,
+                         struct dev_power_governor *gov, bool is_off);
+ extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
++extern void pm_genpd_poweroff_unused(void);
+ #else
+ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+@@ -98,6 +100,7 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
+       return -ENOSYS;
+ }
++static inline void pm_genpd_poweroff_unused(void) {}
+ #endif
+ #endif /* _LINUX_PM_DOMAIN_H */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0029-ARM-shmobile-Use-genpd_queue_power_off_work.patch b/patches.runtime_pm/0029-ARM-shmobile-Use-genpd_queue_power_off_work.patch
new file mode 100644 (file)
index 0000000..41c2c0c
--- /dev/null
@@ -0,0 +1,59 @@
+From d68d1d65868aadd10c580d02b365a7c01ee01a3b Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 14 Jul 2011 20:59:07 +0200
+Subject: ARM / shmobile: Use genpd_queue_power_off_work()
+
+Make pd_power_down_a3rv() use genpd_queue_power_off_work() to queue
+up the powering off of the A4LC domain to avoid queuing it up when
+it is pending.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit 0bc5b2debb832191a42baea7ff59d2ca6ce9f7d5)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    2 +-
+ include/linux/pm_domain.h   |    2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index c2c537d..00ed4f3 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -222,7 +222,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+  * before.
+  */
+-static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
++void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+ {
+       if (!work_pending(&genpd->power_off_work))
+               queue_work(pm_wq, &genpd->power_off_work);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 3e4f3d3..21097cb 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -73,6 +73,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
+                         struct dev_power_governor *gov, bool is_off);
+ extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+ extern void pm_genpd_poweroff_unused(void);
++extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
+ #else
+ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+@@ -101,6 +102,7 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+       return -ENOSYS;
+ }
+ static inline void pm_genpd_poweroff_unused(void) {}
++static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
+ #endif
+ #endif /* _LINUX_PM_DOMAIN_H */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0030-PM-Domains-Take-.power_off-error-code-into-account.patch b/patches.runtime_pm/0030-PM-Domains-Take-.power_off-error-code-into-account.patch
new file mode 100644 (file)
index 0000000..21441cb
--- /dev/null
@@ -0,0 +1,47 @@
+From 13482707dcaba63735aff5a7479ae36fc9abf60f Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 14 Jul 2011 20:59:20 +0200
+Subject: PM / Domains: Take .power_off() error code into account
+
+Currently pm_genpd_poweroff() discards error codes returned by
+the PM domain's .power_off() callback, because it's safer to always
+regard the domain as inaccessible to drivers after a failing
+.power_off().  Still, there are situations in which the low-level
+code may want to indicate that it doesn't want to power off the
+domain, so allow it to do that by returning -EBUSY from .power_off().
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit d28054020f97c7c9f15327a53945f0f40ffc5d7a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 00ed4f3..be8714a 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -312,8 +312,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+               }
+       }
+-      if (genpd->power_off)
+-              genpd->power_off(genpd);
++      if (genpd->power_off) {
++              ret = genpd->power_off(genpd);
++              if (ret == -EBUSY) {
++                      genpd_set_active(genpd);
++                      if (parent)
++                              genpd_release_lock(parent);
++
++                      goto out;
++              }
++      }
+       genpd->status = GPD_STATE_POWER_OFF;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0031-PM-OPP-Introduce-function-to-free-cpufreq-table.patch b/patches.runtime_pm/0031-PM-OPP-Introduce-function-to-free-cpufreq-table.patch
new file mode 100644 (file)
index 0000000..999c40f
--- /dev/null
@@ -0,0 +1,88 @@
+From 978454db90df87db0d7389550fac6b7417cc28ec Mon Sep 17 00:00:00 2001
+From: Nishanth Menon <nm@ti.com>
+Date: Fri, 10 Jun 2011 20:24:57 +0200
+Subject: PM / OPP: Introduce function to free cpufreq table
+
+cpufreq table allocated by opp_init_cpufreq_table is better
+freed by OPP layer itself. This allows future modifications to
+the table handling to be transparent to the users.
+
+Signed-off-by: Nishanth Menon <nm@ti.com>
+Acked-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 99f381d3549432a250fe846a2a82d61a032804b0)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/opp.txt |    2 ++
+ drivers/base/power/opp.c    |   17 +++++++++++++++++
+ include/linux/opp.h         |    8 ++++++++
+ 3 files changed, 27 insertions(+)
+
+diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
+index 5ae70a12..3035d00 100644
+--- a/Documentation/power/opp.txt
++++ b/Documentation/power/opp.txt
+@@ -321,6 +321,8 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
+       addition to CONFIG_PM as power management feature is required to
+       dynamically scale voltage and frequency in a system.
++opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
++
+ 7. Data Structures
+ ==================
+ Typically an SoC contains multiple voltage domains which are variable. Each
+diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
+index 56a6899..5cc1232 100644
+--- a/drivers/base/power/opp.c
++++ b/drivers/base/power/opp.c
+@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
+       return 0;
+ }
++
++/**
++ * opp_free_cpufreq_table() - free the cpufreq table
++ * @dev:      device for which we do this operation
++ * @table:    table to free
++ *
++ * Free up the table allocated by opp_init_cpufreq_table
++ */
++void opp_free_cpufreq_table(struct device *dev,
++                              struct cpufreq_frequency_table **table)
++{
++      if (!table)
++              return;
++
++      kfree(*table);
++      *table = NULL;
++}
+ #endif                /* CONFIG_CPU_FREQ */
+diff --git a/include/linux/opp.h b/include/linux/opp.h
+index 5449945..7020e97 100644
+--- a/include/linux/opp.h
++++ b/include/linux/opp.h
+@@ -94,12 +94,20 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
+ #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+ int opp_init_cpufreq_table(struct device *dev,
+                           struct cpufreq_frequency_table **table);
++void opp_free_cpufreq_table(struct device *dev,
++                              struct cpufreq_frequency_table **table);
+ #else
+ static inline int opp_init_cpufreq_table(struct device *dev,
+                           struct cpufreq_frequency_table **table)
+ {
+       return -EINVAL;
+ }
++
++static inline
++void opp_free_cpufreq_table(struct device *dev,
++                              struct cpufreq_frequency_table **table)
++{
++}
+ #endif                /* CONFIG_CPU_FREQ */
+ #endif                /* __LINUX_OPP_H__ */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0032-PM-Suspend-Add-.suspend_again-callback-to-suspend_op.patch b/patches.runtime_pm/0032-PM-Suspend-Add-.suspend_again-callback-to-suspend_op.patch
new file mode 100644 (file)
index 0000000..09e0e11
--- /dev/null
@@ -0,0 +1,151 @@
+From 80726e611ac09d6a07c375471d96d69cf50e1cef Mon Sep 17 00:00:00 2001
+From: MyungJoo Ham <myungjoo.ham@samsung.com>
+Date: Sun, 12 Jun 2011 15:57:05 +0200
+Subject: PM / Suspend: Add .suspend_again() callback to suspend_ops
+
+A system or a device may need to control suspend/wakeup events. It may
+want to wakeup the system after a predefined amount of time or at a
+predefined event decided while entering suspend for polling or delayed
+work. Then, it may want to enter suspend again if its predefined wakeup
+condition is the only wakeup reason and there is no outstanding events;
+thus, it does not wakeup the userspace unnecessary or unnecessary
+devices and keeps suspended as long as possible (saving the power).
+
+Enabling a system to wakeup after a specified time can be easily
+achieved by using RTC. However, to enter suspend again immediately
+without invoking userland and unrelated devices, we need additional
+features in the suspend framework.
+
+Such need comes from:
+
+ 1. Monitoring a critical device status without interrupts that can
+wakeup the system. (in-suspend polling)
+ An example is ambient temperature monitoring that needs to shut down
+the system or a specific device function if it is too hot or cold. The
+temperature of a specific device may be needed to be monitored as well;
+e.g., a charger monitors battery temperature in order to stop charging
+if overheated.
+
+ 2. Execute critical "delayed work" at suspend.
+ A driver or a system/board may have a delayed work (or any similar
+things) that it wants to execute at the requested time.
+ For example, some chargers want to check the battery voltage some
+time (e.g., 30 seconds) after the battery is fully charged and the
+charger has stopped. Then, the charger restarts charging if the voltage
+has dropped more than a threshold, which is smaller than "restart-charger"
+voltage, which is a threshold to restart charging regardless of the
+time passed.
+
+This patch allows to add "suspend_again" callback at struct
+platform_suspend_ops and let the "suspend_again" callback return true if
+the system is required to enter suspend again after the current instance
+of wakeup. Device-wise suspend_again implemented at dev_pm_ops or
+syscore is not done because: a) suspend_again feature is usually under
+platform-wise decision and controls the behavior of the whole platform
+and b) There are very limited devices related to the usage cases of
+suspend_again; chargers and temperature sensors are mentioned so far.
+
+With suspend_again callback registered at struct platform_suspend_ops
+suspend_ops in kernel/power/suspend.c with suspend_set_ops by the
+platform, the suspend framework tries to enter suspend again by
+looping suspend_enter() if suspend_again has returned true and there has
+been no errors in the suspending sequence or pending wakeups (by
+pm_wakeup_pending).
+
+Tested at Exynos4-NURI.
+
+[rjw: Fixed up kerneldoc comment for suspend_enter().]
+
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 3b5fe85252326217cd96f24a7bda4460d8f71bee)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/suspend.h |    8 ++++++++
+ kernel/power/suspend.c  |   18 ++++++++++++------
+ 2 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index 083ffea..e1e3742 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -92,6 +92,13 @@ typedef int __bitwise suspend_state_t;
+  *    @enter() and @wake(), even if any of them fails.  It is executed after
+  *    a failing @prepare.
+  *
++ * @suspend_again: Returns whether the system should suspend again (true) or
++ *    not (false). If the platform wants to poll sensors or execute some
++ *    code during suspended without invoking userspace and most of devices,
++ *    suspend_again callback is the place assuming that periodic-wakeup or
++ *    alarm-wakeup is already setup. This allows to execute some codes while
++ *    being kept suspended in the view of userland and devices.
++ *
+  * @end: Called by the PM core right after resuming devices, to indicate to
+  *    the platform that the system has returned to the working state or
+  *    the transition to the sleep state has been aborted.
+@@ -113,6 +120,7 @@ struct platform_suspend_ops {
+       int (*enter)(suspend_state_t state);
+       void (*wake)(void);
+       void (*finish)(void);
++      bool (*suspend_again)(void);
+       void (*end)(void);
+       void (*recover)(void);
+ };
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 449ccc9..bb98404 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -126,12 +126,13 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
+ }
+ /**
+- *    suspend_enter - enter the desired system sleep state.
+- *    @state:         state to enter
++ * suspend_enter - enter the desired system sleep state.
++ * @state: State to enter
++ * @wakeup: Returns information that suspend should not be entered again.
+  *
+- *    This function should be called after devices have been suspended.
++ * This function should be called after devices have been suspended.
+  */
+-static int suspend_enter(suspend_state_t state)
++static int suspend_enter(suspend_state_t state, bool *wakeup)
+ {
+       int error;
+@@ -165,7 +166,8 @@ static int suspend_enter(suspend_state_t state)
+       error = syscore_suspend();
+       if (!error) {
+-              if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
++              *wakeup = pm_wakeup_pending();
++              if (!(suspend_test(TEST_CORE) || *wakeup)) {
+                       error = suspend_ops->enter(state);
+                       events_check_enabled = false;
+               }
+@@ -199,6 +201,7 @@ static int suspend_enter(suspend_state_t state)
+ int suspend_devices_and_enter(suspend_state_t state)
+ {
+       int error;
++      bool wakeup = false;
+       if (!suspend_ops)
+               return -ENOSYS;
+@@ -220,7 +223,10 @@ int suspend_devices_and_enter(suspend_state_t state)
+       if (suspend_test(TEST_DEVICES))
+               goto Recover_platform;
+-      error = suspend_enter(state);
++      do {
++              error = suspend_enter(state, &wakeup);
++      } while (!error && !wakeup
++              && suspend_ops->suspend_again && suspend_ops->suspend_again());
+  Resume_devices:
+       suspend_test_start();
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0033-PM-Suspend-Export-suspend_set_ops-suspend_valid_only.patch b/patches.runtime_pm/0033-PM-Suspend-Export-suspend_set_ops-suspend_valid_only.patch
new file mode 100644 (file)
index 0000000..1a4938d
--- /dev/null
@@ -0,0 +1,43 @@
+From b12f64fa8677140c0b96d230c1166468ec7c9520 Mon Sep 17 00:00:00 2001
+From: Kevin Hilman <khilman@ti.com>
+Date: Mon, 27 Jun 2011 01:01:07 +0200
+Subject: PM / Suspend: Export suspend_set_ops, suspend_valid_only_mem
+
+Some platforms wish to implement their PM core suspend code as
+modules.  To do so, these functions need to be exported to modules.
+
+[rjw: Replaced EXPORT_SYMBOL with EXPORT_SYMBOL_GPL]
+
+Reported-by: Jean Pihet <j-pihet@ti.com>
+Signed-off-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit a5e4fd8783a2bec861ecf1138cdc042269ff59aa)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/suspend.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index bb98404..d3caa76 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -44,6 +44,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
+       suspend_ops = ops;
+       mutex_unlock(&pm_mutex);
+ }
++EXPORT_SYMBOL_GPL(suspend_set_ops);
+ bool valid_state(suspend_state_t state)
+ {
+@@ -65,6 +66,7 @@ int suspend_valid_only_mem(suspend_state_t state)
+ {
+       return state == PM_SUSPEND_MEM;
+ }
++EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
+ static int suspend_test(int level)
+ {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0034-PM-Add-RTC-to-PM-trace-time-stamps-to-avoid-confusio.patch b/patches.runtime_pm/0034-PM-Add-RTC-to-PM-trace-time-stamps-to-avoid-confusio.patch
new file mode 100644 (file)
index 0000000..9649379
--- /dev/null
@@ -0,0 +1,35 @@
+From f71046d97ad0c115952173bf02c87c2be6b1d40b Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 27 Jun 2011 01:01:16 +0200
+Subject: PM: Add "RTC" to PM trace time stamps to avoid confusion
+
+Some users are apparently confused by dmesg output from
+read_magic_time(), which looks like "real" time and date.
+Add the "RTC" string to time stamps printed by read_magic_time() to
+avoid that confusion.
+
+Reported-by: Justin P. Mattock <justinmattock@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 1d8047a6f7973470bb1de4606a6e00c0bbee3cc6)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/trace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index c80e138..af10abe 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
+       unsigned int val;
+       get_rtc_time(&time);
+-      pr_info("Time: %2d:%02d:%02d  Date: %02d/%02d/%02d\n",
++      pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
+               time.tm_hour, time.tm_min, time.tm_sec,
+               time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
+       val = time.tm_year;                             /* 100 years */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0035-PM-Improve-error-code-of-pm_notifier_call_chain.patch b/patches.runtime_pm/0035-PM-Improve-error-code-of-pm_notifier_call_chain.patch
new file mode 100644 (file)
index 0000000..dc7cb69
--- /dev/null
@@ -0,0 +1,100 @@
+From 40b4c11aefa0df4e8a47bb5cf4fa640a08c0f736 Mon Sep 17 00:00:00 2001
+From: Akinobu Mita <akinobu.mita@gmail.com>
+Date: Fri, 8 Jul 2011 20:53:36 +0200
+Subject: PM: Improve error code of pm_notifier_call_chain()
+
+This enables pm_notifier_call_chain() to get the actual error code
+in the callback rather than always assume -EINVAL by converting all
+PM notifier calls to return encapsulate error code with
+notifier_from_errno().
+
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit f0c077a8b7f9dce590c760a7b2f3c417dffa52d1)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/char/apm-emulation.c   |    2 +-
+ drivers/s390/char/vmwatchdog.c |    4 ++--
+ drivers/s390/cio/css.c         |    8 ++++----
+ kernel/power/main.c            |    5 +++--
+ 4 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
+index 548708c..a7346ab 100644
+--- a/drivers/char/apm-emulation.c
++++ b/drivers/char/apm-emulation.c
+@@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
+                       return NOTIFY_OK;
+               /* interrupted by signal */
+-              return NOTIFY_BAD;
++              return notifier_from_errno(err);
+       case PM_POST_SUSPEND:
+               /*
+diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
+index 12ef912..11312f4 100644
+--- a/drivers/s390/char/vmwatchdog.c
++++ b/drivers/s390/char/vmwatchdog.c
+@@ -258,13 +258,13 @@ static int vmwdt_suspend(void)
+       if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
+               pr_err("The system cannot be suspended while the watchdog"
+                       " is in use\n");
+-              return NOTIFY_BAD;
++              return notifier_from_errno(-EBUSY);
+       }
+       if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
+               clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+               pr_err("The system cannot be suspended while the watchdog"
+                       " is running\n");
+-              return NOTIFY_BAD;
++              return notifier_from_errno(-EBUSY);
+       }
+       return NOTIFY_DONE;
+ }
+diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
+index c47b25f..92d7324 100644
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
+                               mutex_unlock(&css->mutex);
+                               continue;
+                       }
+-                      if (__chsc_do_secm(css, 0))
+-                              ret = NOTIFY_BAD;
++                      ret = __chsc_do_secm(css, 0);
++                      ret = notifier_from_errno(ret);
+                       mutex_unlock(&css->mutex);
+               }
+               break;
+@@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
+                               mutex_unlock(&css->mutex);
+                               continue;
+                       }
+-                      if (__chsc_do_secm(css, 1))
+-                              ret = NOTIFY_BAD;
++                      ret = __chsc_do_secm(css, 1);
++                      ret = notifier_from_errno(ret);
+                       mutex_unlock(&css->mutex);
+               }
+               /* search for subchannels, which appeared during hibernation */
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 2981af4..6c601f8 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -37,8 +37,9 @@ EXPORT_SYMBOL_GPL(unregister_pm_notifier);
+ int pm_notifier_call_chain(unsigned long val)
+ {
+-      return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
+-                      == NOTIFY_BAD) ? -EINVAL : 0;
++      int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
++
++      return notifier_to_errno(ret);
+ }
+ /* If set, devices may be suspended and resumed asynchronously. */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0036-drivers-base-power-opp.c-fix-dev_opp-initial-value.patch b/patches.runtime_pm/0036-drivers-base-power-opp.c-fix-dev_opp-initial-value.patch
new file mode 100644 (file)
index 0000000..4674bb6
--- /dev/null
@@ -0,0 +1,36 @@
+From 228f47a156a24f7d1b39930f0a677eb91fabb9da Mon Sep 17 00:00:00 2001
+From: Jonghwan Choi <jhbird.choi@samsung.com>
+Date: Tue, 26 Jul 2011 16:08:16 -0700
+Subject: drivers/base/power/opp.c: fix dev_opp initial value
+
+Dev_opp initial value shoule be ERR_PTR(), IS_ERR() is used to check
+error.
+
+Signed-off-by: Jonghwan Choi <jhbird.choi@samsung.com>
+Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
+Cc: Greg KH <greg@kroah.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit fc92805a8e25e5e2b0ba7c413cc15d9f05962ee8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/opp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
+index 5cc1232..b23de18 100644
+--- a/drivers/base/power/opp.c
++++ b/drivers/base/power/opp.c
+@@ -453,7 +453,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+ static int opp_set_availability(struct device *dev, unsigned long freq,
+               bool availability_req)
+ {
+-      struct device_opp *tmp_dev_opp, *dev_opp = NULL;
++      struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+       struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       int r = 0;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0037-PM-Domains-Fix-pm_genpd_poweron.patch b/patches.runtime_pm/0037-PM-Domains-Fix-pm_genpd_poweron.patch
new file mode 100644 (file)
index 0000000..5dd4d1c
--- /dev/null
@@ -0,0 +1,45 @@
+From 83396d4b1611eebbb93949b08fe0d4c36d5f4982 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 5 Aug 2011 21:45:11 +0200
+Subject: PM / Domains: Fix pm_genpd_poweron()
+
+The local variable ret is defined twice in pm_genpd_poweron(), which
+causes this function to always return 0, even if the PM domain's
+.power_on() callback fails, in which case an error code should be
+returned.
+
+Remove the wrong second definition of ret and additionally remove an
+unnecessary definition of wait from pm_genpd_poweron().
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit fe202fde50a986a8510c62a76dc8733c1a8fac86)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index be8714a..e18566a 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -80,7 +80,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
+ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
+       struct generic_pm_domain *parent = genpd->parent;
+-      DEFINE_WAIT(wait);
+       int ret = 0;
+  start:
+@@ -112,7 +111,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+       }
+       if (genpd->power_on) {
+-              int ret = genpd->power_on(genpd);
++              ret = genpd->power_on(genpd);
+               if (ret)
+                       goto out;
+       }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0038-PM-Runtime-Allow-_put_sync-from-interrupts-disabled-.patch b/patches.runtime_pm/0038-PM-Runtime-Allow-_put_sync-from-interrupts-disabled-.patch
new file mode 100644 (file)
index 0000000..6ad1083
--- /dev/null
@@ -0,0 +1,90 @@
+From fe65f70e3ddea03d11ba56bed454621f2aad9961 Mon Sep 17 00:00:00 2001
+From: Kevin Hilman <khilman@ti.com>
+Date: Fri, 5 Aug 2011 21:45:20 +0200
+Subject: PM / Runtime: Allow _put_sync() from interrupts-disabled context
+
+Currently the use of pm_runtime_put_sync() is not safe from
+interrupts-disabled context because rpm_idle() will release the
+spinlock and enable interrupts for the idle callbacks.  This enables
+interrupts during a time where interrupts were expected to be
+disabled, and can have strange side effects on drivers that expected
+interrupts to be disabled.
+
+This is not a bug since the documentation clearly states that only
+_put_sync_suspend() is safe in IRQ-safe mode.
+
+However, pm_runtime_put_sync() could be made safe when in IRQ-safe
+mode by releasing the spinlock but not re-enabling interrupts, which
+is what this patch aims to do.
+
+Problem was found when using some buggy drivers that set
+pm_runtime_irq_safe() and used _put_sync() in interrupts-disabled
+context.
+
+Reported-by: Colin Cross <ccross@google.com>
+Tested-by: Nishanth Menon <nm@ti.com>
+Signed-off-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 02b26774afebb2d62695ba3230319d70d8c6cc2d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   10 +++++-----
+ drivers/base/power/runtime.c       |   10 ++++++++--
+ 2 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 14dd3c6..4ce5450 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -54,11 +54,10 @@ referred to as subsystem-level callbacks in what follows.
+ By default, the callbacks are always invoked in process context with interrupts
+ enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
+ to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
+-callbacks should be invoked in atomic context with interrupts disabled
+-(->runtime_idle() is still invoked the default way).  This implies that these
+-callback routines must not block or sleep, but it also means that the
+-synchronous helper functions listed at the end of Section 4 can be used within
+-an interrupt handler or in an atomic context.
++callbacks should be invoked in atomic context with interrupts disabled.
++This implies that these callback routines must not block or sleep, but it also
++means that the synchronous helper functions listed at the end of Section 4 can
++be used within an interrupt handler or in an atomic context.
+ The subsystem-level suspend callback is _entirely_ _responsible_ for handling
+ the suspend of the device as appropriate, which may, but need not include
+@@ -483,6 +482,7 @@ pm_runtime_suspend()
+ pm_runtime_autosuspend()
+ pm_runtime_resume()
+ pm_runtime_get_sync()
++pm_runtime_put_sync()
+ pm_runtime_put_sync_suspend()
+ 5. Runtime PM Initialization, Device Probing and Removal
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8dc247c..acb3f83 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -226,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
+               callback = NULL;
+       if (callback) {
+-              spin_unlock_irq(&dev->power.lock);
++              if (dev->power.irq_safe)
++                      spin_unlock(&dev->power.lock);
++              else
++                      spin_unlock_irq(&dev->power.lock);
+               callback(dev);
+-              spin_lock_irq(&dev->power.lock);
++              if (dev->power.irq_safe)
++                      spin_lock(&dev->power.lock);
++              else
++                      spin_lock_irq(&dev->power.lock);
+       }
+       dev->power.idle_notification = false;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0039-PM-Domains-Fix-build-for-CONFIG_PM_RUNTIME-unset.patch b/patches.runtime_pm/0039-PM-Domains-Fix-build-for-CONFIG_PM_RUNTIME-unset.patch
new file mode 100644 (file)
index 0000000..4e39b52
--- /dev/null
@@ -0,0 +1,109 @@
+From ac9b6c62b9958c273e8c6f5ceef418287846b5b7 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 14 Aug 2011 13:34:31 +0200
+Subject: PM / Domains: Fix build for CONFIG_PM_RUNTIME unset
+
+Function genpd_queue_power_off_work() is not defined for
+CONFIG_PM_RUNTIME, so pm_genpd_poweroff_unused() causes a build
+error to happen in that case.  Fix the problem by making
+pm_genpd_poweroff_unused() depend on CONFIG_PM_RUNTIME too.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 17f2ae7f677f023997e02fd2ebabd90ea2a0390d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   30 +++++++++++++++---------------
+ include/linux/pm_domain.h   |   10 +++++++---
+ kernel/power/Kconfig        |    4 ++++
+ 3 files changed, 26 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index e18566a..1c37457 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -460,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       return 0;
+ }
++/**
++ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
++ */
++void pm_genpd_poweroff_unused(void)
++{
++      struct generic_pm_domain *genpd;
++
++      mutex_lock(&gpd_list_lock);
++
++      list_for_each_entry(genpd, &gpd_list, gpd_list_node)
++              genpd_queue_power_off_work(genpd);
++
++      mutex_unlock(&gpd_list_lock);
++}
++
+ #else
+ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+@@ -1255,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       list_add(&genpd->gpd_list_node, &gpd_list);
+       mutex_unlock(&gpd_list_lock);
+ }
+-
+-/**
+- * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+- */
+-void pm_genpd_poweroff_unused(void)
+-{
+-      struct generic_pm_domain *genpd;
+-
+-      mutex_lock(&gpd_list_lock);
+-
+-      list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+-              genpd_queue_power_off_work(genpd);
+-
+-      mutex_unlock(&gpd_list_lock);
+-}
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 21097cb..f9ec173 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -72,8 +72,6 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ extern void pm_genpd_init(struct generic_pm_domain *genpd,
+                         struct dev_power_governor *gov, bool is_off);
+ extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+-extern void pm_genpd_poweroff_unused(void);
+-extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
+ #else
+ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+@@ -101,8 +99,14 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
+       return -ENOSYS;
+ }
+-static inline void pm_genpd_poweroff_unused(void) {}
++#endif
++
++#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
++extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
++extern void pm_genpd_poweroff_unused(void);
++#else
+ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
++static inline void pm_genpd_poweroff_unused(void) {}
+ #endif
+ #endif /* _LINUX_PM_DOMAIN_H */
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index b1914cb9..3744c59 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -231,3 +231,7 @@ config PM_CLK
+ config PM_GENERIC_DOMAINS
+       bool
+       depends on PM
++
++config PM_GENERIC_DOMAINS_RUNTIME
++      def_bool y
++      depends on PM_RUNTIME && PM_GENERIC_DOMAINS
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0040-PM-Runtime-Add-might_sleep-to-runtime-PM-functions.patch b/patches.runtime_pm/0040-PM-Runtime-Add-might_sleep-to-runtime-PM-functions.patch
new file mode 100644 (file)
index 0000000..a8ca41b
--- /dev/null
@@ -0,0 +1,106 @@
+From 328eba5e3cefcd17ddcd4b700be7e92909e8320a Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Mon, 8 Aug 2011 23:39:36 +0200
+Subject: PM / Runtime: Add might_sleep() to runtime PM functions
+
+Some of the entry points to pm runtime are not safe to
+call in atomic context unless pm_runtime_irq_safe() has
+been called.  Inspecting the code, it is not immediately
+obvious that the functions sleep at all, as they run
+inside a spin_lock_irqsave, but under some conditions
+they can drop the lock and turn on irqs.
+
+If a driver incorrectly calls the pm_runtime apis, it can
+cause sleeping and irq processing when it expects to stay
+in atomic context.
+
+Add might_sleep_if to the majority of the __pm_runtime_* entry points
+to enforce correct usage.
+
+Add pm_runtime_put_sync_autosuspend to the list of
+functions that can be called in atomic context.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 311aab73d273eb22be976055f6cab224f7279d5e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |    1 +
+ drivers/base/power/runtime.c       |   15 ++++++++++++---
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 4ce5450..62f37bc 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -484,6 +484,7 @@ pm_runtime_resume()
+ pm_runtime_get_sync()
+ pm_runtime_put_sync()
+ pm_runtime_put_sync_suspend()
++pm_runtime_put_sync_autosuspend()
+ 5. Runtime PM Initialization, Device Probing and Removal
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index acb3f83..04e18ab 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -732,13 +732,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+  * return immediately if it is larger than zero.  Then carry out an idle
+  * notification, either synchronous or asynchronous.
+  *
+- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
++ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
++ * or if pm_runtime_irq_safe() has been called.
+  */
+ int __pm_runtime_idle(struct device *dev, int rpmflags)
+ {
+       unsigned long flags;
+       int retval;
++      might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
++
+       if (rpmflags & RPM_GET_PUT) {
+               if (!atomic_dec_and_test(&dev->power.usage_count))
+                       return 0;
+@@ -761,13 +764,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
+  * return immediately if it is larger than zero.  Then carry out a suspend,
+  * either synchronous or asynchronous.
+  *
+- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
++ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
++ * or if pm_runtime_irq_safe() has been called.
+  */
+ int __pm_runtime_suspend(struct device *dev, int rpmflags)
+ {
+       unsigned long flags;
+       int retval;
++      might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
++
+       if (rpmflags & RPM_GET_PUT) {
+               if (!atomic_dec_and_test(&dev->power.usage_count))
+                       return 0;
+@@ -789,13 +795,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
+  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
+  * carry out a resume, either synchronous or asynchronous.
+  *
+- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
++ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
++ * or if pm_runtime_irq_safe() has been called.
+  */
+ int __pm_runtime_resume(struct device *dev, int rpmflags)
+ {
+       unsigned long flags;
+       int retval;
++      might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
++
+       if (rpmflags & RPM_GET_PUT)
+               atomic_inc(&dev->power.usage_count);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0041-PM-Runtime-Add-macro-to-test-for-runtime-PM-events.patch b/patches.runtime_pm/0041-PM-Runtime-Add-macro-to-test-for-runtime-PM-events.patch
new file mode 100644 (file)
index 0000000..fc1bad7
--- /dev/null
@@ -0,0 +1,356 @@
+From 8ddf7a667b20addc334227054a0147cf0b882938 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Fri, 19 Aug 2011 23:49:48 +0200
+Subject: PM / Runtime: Add macro to test for runtime PM events
+
+This patch (as1482) adds a macro for testing whether or not a
+pm_message value represents an autosuspend or autoresume (i.e., a
+runtime PM) event.  Encapsulating this notion seems preferable to
+open-coding the test all over the place.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 5b1b0b812a7b1a5b968c5d06d90d1cb88621b941)
+
+Conflicts:
+
+       drivers/usb/class/cdc-wdm.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/usb/power-management.txt |    8 ++++----
+ drivers/bluetooth/btusb.c              |    2 +-
+ drivers/hid/hid-picolcd.c              |    2 +-
+ drivers/hid/usbhid/hid-core.c          |    7 +++----
+ drivers/net/usb/usbnet.c               |    2 +-
+ drivers/net/wimax/i2400m/usb.c         |    4 ++--
+ drivers/usb/class/cdc-acm.c            |    2 +-
+ drivers/usb/class/cdc-wdm.c            |    6 +++---
+ drivers/usb/core/driver.c              |    9 ++++-----
+ drivers/usb/core/hcd.c                 |    4 ++--
+ drivers/usb/core/hub.c                 |   10 +++++-----
+ drivers/usb/serial/sierra.c            |    2 +-
+ drivers/usb/serial/usb_wwan.c          |    2 +-
+ include/linux/pm.h                     |    2 ++
+ sound/usb/card.c                       |    2 +-
+ 15 files changed, 32 insertions(+), 32 deletions(-)
+
+diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
+index c9ffa9c..e8662a5 100644
+--- a/Documentation/usb/power-management.txt
++++ b/Documentation/usb/power-management.txt
+@@ -439,10 +439,10 @@ cause autosuspends to fail with -EBUSY if the driver needs to use the
+ device.
+ External suspend calls should never be allowed to fail in this way,
+-only autosuspend calls.  The driver can tell them apart by checking
+-the PM_EVENT_AUTO bit in the message.event argument to the suspend
+-method; this bit will be set for internal PM events (autosuspend) and
+-clear for external PM events.
++only autosuspend calls.  The driver can tell them apart by applying
++the PMSG_IS_AUTO() macro to the message argument to the suspend
++method; it will return True for internal PM events (autosuspend) and
++False for external PM events.
+       Mutual exclusion
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 9f3a8b3..0a06b97 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1109,7 +1109,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
+               return 0;
+       spin_lock_irq(&data->txlock);
+-      if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
++      if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
+               set_bit(BTUSB_SUSPENDING, &data->flags);
+               spin_unlock_irq(&data->txlock);
+       } else {
+diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
+index 9d8710f..1782693 100644
+--- a/drivers/hid/hid-picolcd.c
++++ b/drivers/hid/hid-picolcd.c
+@@ -2409,7 +2409,7 @@ static int picolcd_raw_event(struct hid_device *hdev,
+ #ifdef CONFIG_PM
+ static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
+ {
+-      if (message.event & PM_EVENT_AUTO)
++      if (PMSG_IS_AUTO(message))
+               return 0;
+       picolcd_suspend_backlight(hid_get_drvdata(hdev));
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 38c261a..17fdfe6 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -1330,7 +1330,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
+       struct usbhid_device *usbhid = hid->driver_data;
+       int status;
+-      if (message.event & PM_EVENT_AUTO) {
++      if (PMSG_IS_AUTO(message)) {
+               spin_lock_irq(&usbhid->lock);   /* Sync with error handler */
+               if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
+                   && !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
+@@ -1365,7 +1365,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
+                       return -EIO;
+       }
+-      if (!ignoreled && (message.event & PM_EVENT_AUTO)) {
++      if (!ignoreled && PMSG_IS_AUTO(message)) {
+               spin_lock_irq(&usbhid->lock);
+               if (test_bit(HID_LED_ON, &usbhid->iofl)) {
+                       spin_unlock_irq(&usbhid->lock);
+@@ -1378,8 +1378,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
+       hid_cancel_delayed_stuff(usbhid);
+       hid_cease_io(usbhid);
+-      if ((message.event & PM_EVENT_AUTO) &&
+-                      test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
++      if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
+               /* lost race against keypresses */
+               status = hid_start_in(hid);
+               if (status < 0)
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 46a6b67..caadbf2 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1480,7 +1480,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+       if (!dev->suspend_count++) {
+               spin_lock_irq(&dev->txq.lock);
+               /* don't autosuspend while transmitting */
+-              if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) {
++              if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+                       spin_unlock_irq(&dev->txq.lock);
+                       return -EBUSY;
+               } else {
+diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
+index 298f2b0..9a644d0 100644
+--- a/drivers/net/wimax/i2400m/usb.c
++++ b/drivers/net/wimax/i2400m/usb.c
+@@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
+  *
+  *    As well, the device might refuse going to sleep for whichever
+  *    reason. In this case we just fail. For system suspend/hibernate,
+- *    we *can't* fail. We check PM_EVENT_AUTO to see if the
++ *    we *can't* fail. We check PMSG_IS_AUTO to see if the
+  *    suspend call comes from the USB stack or from the system and act
+  *    in consequence.
+  *
+@@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
+       struct i2400m *i2400m = &i2400mu->i2400m;
+ #ifdef CONFIG_PM
+-      if (pm_msg.event & PM_EVENT_AUTO)
++      if (PMSG_IS_AUTO(pm_msg))
+               is_autosuspend = 1;
+ #endif
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 158f631..4748313 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1319,7 +1319,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+       struct acm *acm = usb_get_intfdata(intf);
+       int cnt;
+-      if (message.event & PM_EVENT_AUTO) {
++      if (PMSG_IS_AUTO(message)) {
+               int b;
+               spin_lock_irq(&acm->write_lock);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 76f0613..e42fae7 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -814,13 +814,13 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+       dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
+       /* if this is an autosuspend the caller does the locking */
+-      if (!(message.event & PM_EVENT_AUTO)) {
++      if (!PMSG_IS_AUTO(message)) {
+               mutex_lock(&desc->rlock);
+               mutex_lock(&desc->wlock);
+       }
+       spin_lock_irq(&desc->iuspin);
+-      if ((message.event & PM_EVENT_AUTO) &&
++      if (PMSG_IS_AUTO(message) &&
+                       (test_bit(WDM_IN_USE, &desc->flags)
+                       || test_bit(WDM_RESPONDING, &desc->flags))) {
+               spin_unlock_irq(&desc->iuspin);
+@@ -833,7 +833,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+               kill_urbs(desc);
+               cancel_work_sync(&desc->rxwork);
+       }
+-      if (!(message.event & PM_EVENT_AUTO)) {
++      if (!PMSG_IS_AUTO(message)) {
+               mutex_unlock(&desc->wlock);
+               mutex_unlock(&desc->rlock);
+       }
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 75b4bc0..3ffdb0e 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1046,8 +1046,7 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
+       /* Non-root devices on a full/low-speed bus must wait for their
+        * companion high-speed root hub, in case a handoff is needed.
+        */
+-      if (!(msg.event & PM_EVENT_AUTO) && udev->parent &&
+-                      udev->bus->hs_companion)
++      if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion)
+               device_pm_wait_for_dev(&udev->dev,
+                               &udev->bus->hs_companion->root_hub->dev);
+@@ -1075,7 +1074,7 @@ static int usb_suspend_interface(struct usb_device *udev,
+       if (driver->suspend) {
+               status = driver->suspend(intf, msg);
+-              if (status && !(msg.event & PM_EVENT_AUTO))
++              if (status && !PMSG_IS_AUTO(msg))
+                       dev_err(&intf->dev, "%s error %d\n",
+                                       "suspend", status);
+       } else {
+@@ -1189,7 +1188,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
+                       status = usb_suspend_interface(udev, intf, msg);
+                       /* Ignore errors during system sleep transitions */
+-                      if (!(msg.event & PM_EVENT_AUTO))
++                      if (!PMSG_IS_AUTO(msg))
+                               status = 0;
+                       if (status != 0)
+                               break;
+@@ -1199,7 +1198,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
+               status = usb_suspend_device(udev, msg);
+               /* Again, ignore errors during system sleep transitions */
+-              if (!(msg.event & PM_EVENT_AUTO))
++              if (!PMSG_IS_AUTO(msg))
+                       status = 0;
+       }
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 45e0908..c284e97 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1950,7 +1950,7 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
+       int             old_state = hcd->state;
+       dev_dbg(&rhdev->dev, "bus %s%s\n",
+-                      (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend");
++                      (PMSG_IS_AUTO(msg) ? "auto-" : ""), "suspend");
+       if (HCD_DEAD(hcd)) {
+               dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend");
+               return 0;
+@@ -1986,7 +1986,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
+       int             old_state = hcd->state;
+       dev_dbg(&rhdev->dev, "usb %s%s\n",
+-                      (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
++                      (PMSG_IS_AUTO(msg) ? "auto-" : ""), "resume");
+       if (HCD_DEAD(hcd)) {
+               dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume");
+               return 0;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 3776ddf..8c390a4 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2352,7 +2352,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+                       dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
+                                       status);
+                       /* bail if autosuspend is requested */
+-                      if (msg.event & PM_EVENT_AUTO)
++                      if (PMSG_IS_AUTO(msg))
+                               return status;
+               }
+       }
+@@ -2377,12 +2377,12 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+                               USB_CTRL_SET_TIMEOUT);
+               /* System sleep transitions should never fail */
+-              if (!(msg.event & PM_EVENT_AUTO))
++              if (!PMSG_IS_AUTO(msg))
+                       status = 0;
+       } else {
+               /* device has up to 10 msec to fully suspend */
+               dev_dbg(&udev->dev, "usb %ssuspend\n",
+-                              (msg.event & PM_EVENT_AUTO ? "auto-" : ""));
++                              (PMSG_IS_AUTO(msg) ? "auto-" : ""));
+               usb_set_device_state(udev, USB_STATE_SUSPENDED);
+               msleep(10);
+       }
+@@ -2533,7 +2533,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+       } else {
+               /* drive resume for at least 20 msec */
+               dev_dbg(&udev->dev, "usb %sresume\n",
+-                              (msg.event & PM_EVENT_AUTO ? "auto-" : ""));
++                              (PMSG_IS_AUTO(msg) ? "auto-" : ""));
+               msleep(25);
+               /* Virtual root hubs can trigger on GET_PORT_STATUS to
+@@ -2635,7 +2635,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
+               udev = hdev->children [port1-1];
+               if (udev && udev->can_submit) {
+                       dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
+-                      if (msg.event & PM_EVENT_AUTO)
++                      if (PMSG_IS_AUTO(msg))
+                               return -EBUSY;
+               }
+       }
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index d5d136a..b18179b 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -1009,7 +1009,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
+       struct sierra_intf_private *intfdata;
+       int b;
+-      if (message.event & PM_EVENT_AUTO) {
++      if (PMSG_IS_AUTO(message)) {
+               intfdata = serial->private;
+               spin_lock_irq(&intfdata->susp_lock);
+               b = intfdata->in_flight;
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index e4fad5e..d555ca9 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -651,7 +651,7 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
+       dbg("%s entered", __func__);
+-      if (message.event & PM_EVENT_AUTO) {
++      if (PMSG_IS_AUTO(message)) {
+               spin_lock_irq(&intfdata->susp_lock);
+               b = intfdata->in_flight;
+               spin_unlock_irq(&intfdata->susp_lock);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index f7c84c9..18de9f8 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -366,6 +366,8 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
+ #define PMSG_AUTO_RESUME      ((struct pm_message) \
+                                       { .event = PM_EVENT_AUTO_RESUME, })
++#define PMSG_IS_AUTO(msg)     (((msg).event & PM_EVENT_AUTO) != 0)
++
+ /**
+  * Device run-time power management status.
+  *
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 57a8e2d..e782fb5 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -631,7 +631,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+       if (chip == (void *)-1L)
+               return 0;
+-      if (!(message.event & PM_EVENT_AUTO)) {
++      if (!PMSG_IS_AUTO(message)) {
+               snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
+               if (!chip->num_suspended_intf++) {
+                       list_for_each(p, &chip->pcm_list) {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0042-PM-Use-spinlock-instead-of-mutex-in-clock-management.patch b/patches.runtime_pm/0042-PM-Use-spinlock-instead-of-mutex-in-clock-management.patch
new file mode 100644 (file)
index 0000000..59ba9e0
--- /dev/null
@@ -0,0 +1,197 @@
+From 778ffe3bbf743f14d2d43e28f38f8f38a3e0106f Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 24 Aug 2011 21:40:56 +0200
+Subject: PM: Use spinlock instead of mutex in clock management functions
+
+The lock member of struct pm_clk_data is of type struct mutex,
+which is a problem, because the suspend and resume routines
+defined in drivers/base/power/clock_ops.c cannot be executed
+with interrupts disabled for this reason.  Modify
+struct pm_clk_data so that its lock member is a spinlock.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit b7ab83edba2d50583bc9520431618489379718b2)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/clock_ops.c |   40 ++++++++++++++++++++++------------------
+ 1 file changed, 22 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index a846b2f..2c18d58 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -19,7 +19,7 @@
+ struct pm_clk_data {
+       struct list_head clock_list;
+-      struct mutex lock;
++      spinlock_t lock;
+ };
+ enum pce_status {
+@@ -73,9 +73,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
+               }
+       }
+-      mutex_lock(&pcd->lock);
++      spin_lock_irq(&pcd->lock);
+       list_add_tail(&ce->node, &pcd->clock_list);
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irq(&pcd->lock);
+       return 0;
+ }
+@@ -83,8 +83,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
+  * __pm_clk_remove - Destroy PM clock entry.
+  * @ce: PM clock entry to destroy.
+  *
+- * This routine must be called under the mutex protecting the PM list of clocks
+- * corresponding the the @ce's device.
++ * This routine must be called under the spinlock protecting the PM list of
++ * clocks corresponding the the @ce's device.
+  */
+ static void __pm_clk_remove(struct pm_clock_entry *ce)
+ {
+@@ -123,7 +123,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
+       if (!pcd)
+               return;
+-      mutex_lock(&pcd->lock);
++      spin_lock_irq(&pcd->lock);
+       list_for_each_entry(ce, &pcd->clock_list, node) {
+               if (!con_id && !ce->con_id) {
+@@ -137,7 +137,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
+               }
+       }
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irq(&pcd->lock);
+ }
+ /**
+@@ -158,7 +158,7 @@ int pm_clk_init(struct device *dev)
+       }
+       INIT_LIST_HEAD(&pcd->clock_list);
+-      mutex_init(&pcd->lock);
++      spin_lock_init(&pcd->lock);
+       dev->power.subsys_data = pcd;
+       return 0;
+ }
+@@ -181,12 +181,12 @@ void pm_clk_destroy(struct device *dev)
+       dev->power.subsys_data = NULL;
+-      mutex_lock(&pcd->lock);
++      spin_lock_irq(&pcd->lock);
+       list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+               __pm_clk_remove(ce);
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irq(&pcd->lock);
+       kfree(pcd);
+ }
+@@ -220,13 +220,14 @@ int pm_clk_suspend(struct device *dev)
+ {
+       struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
++      unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+       if (!pcd)
+               return 0;
+-      mutex_lock(&pcd->lock);
++      spin_lock_irqsave(&pcd->lock, flags);
+       list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
+               if (ce->status == PCE_STATUS_NONE)
+@@ -238,7 +239,7 @@ int pm_clk_suspend(struct device *dev)
+               }
+       }
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irqrestore(&pcd->lock, flags);
+       return 0;
+ }
+@@ -251,13 +252,14 @@ int pm_clk_resume(struct device *dev)
+ {
+       struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
++      unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+       if (!pcd)
+               return 0;
+-      mutex_lock(&pcd->lock);
++      spin_lock_irqsave(&pcd->lock, flags);
+       list_for_each_entry(ce, &pcd->clock_list, node) {
+               if (ce->status == PCE_STATUS_NONE)
+@@ -269,7 +271,7 @@ int pm_clk_resume(struct device *dev)
+               }
+       }
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irqrestore(&pcd->lock, flags);
+       return 0;
+ }
+@@ -344,6 +346,7 @@ int pm_clk_suspend(struct device *dev)
+ {
+       struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
++      unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -351,12 +354,12 @@ int pm_clk_suspend(struct device *dev)
+       if (!pcd || !dev->driver)
+               return 0;
+-      mutex_lock(&pcd->lock);
++      spin_lock_irqsave(&pcd->lock, flags);
+       list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+               clk_disable(ce->clk);
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irqrestore(&pcd->lock, flags);
+       return 0;
+ }
+@@ -369,6 +372,7 @@ int pm_clk_resume(struct device *dev)
+ {
+       struct pm_clk_data *pcd = __to_pcd(dev);
+       struct pm_clock_entry *ce;
++      unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -376,12 +380,12 @@ int pm_clk_resume(struct device *dev)
+       if (!pcd || !dev->driver)
+               return 0;
+-      mutex_lock(&pcd->lock);
++      spin_lock_irqsave(&pcd->lock, flags);
+       list_for_each_entry(ce, &pcd->clock_list, node)
+               clk_enable(ce->clk);
+-      mutex_unlock(&pcd->lock);
++      spin_unlock_irqrestore(&pcd->lock, flags);
+       return 0;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0043-PM-Runtime-Correct-documentation-of-pm_runtime_irq_s.patch b/patches.runtime_pm/0043-PM-Runtime-Correct-documentation-of-pm_runtime_irq_s.patch
new file mode 100644 (file)
index 0000000..83c9e89
--- /dev/null
@@ -0,0 +1,35 @@
+From 69f50eb4b26a6c7d2c267c14a73939399db3bfd3 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 25 Aug 2011 15:31:05 +0200
+Subject: PM / Runtime: Correct documentation of pm_runtime_irq_safe()
+
+The description of pm_runtime_irq_safe() has to be updated to follow
+the code after commit 02b2677 (PM / Runtime: Allow _put_sync() from
+interrupts-disabled context).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit 64584eb9cde5f3c5a07f24b2e7cd38f1157be181)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 62f37bc..08d70e4 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -431,8 +431,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+   void pm_runtime_irq_safe(struct device *dev);
+     - set the power.irq_safe flag for the device, causing the runtime-PM
+-      suspend and resume callbacks (but not the idle callback) to be invoked
+-      with interrupts disabled
++      callbacks to be invoked with interrupts off
+   void pm_runtime_mark_last_busy(struct device *dev);
+     - set the power.last_busy field to the current time
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0044-PM-Domains-Implement-subdomain-counters-as-atomic-fi.patch b/patches.runtime_pm/0044-PM-Domains-Implement-subdomain-counters-as-atomic-fi.patch
new file mode 100644 (file)
index 0000000..9bcb426
--- /dev/null
@@ -0,0 +1,128 @@
+From 1a657a8b091e233efe124c46878b85e760895442 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:04 +0200
+Subject: PM / Domains: Implement subdomain counters as atomic fields
+
+Currently, pm_genpd_poweron() and pm_genpd_poweroff() need to take
+the parent PM domain's lock in order to modify the parent's counter
+of active subdomains in a nonracy way.  This causes the locking to be
+considerably complex and in fact is not necessary, because the
+subdomain counters may be implemented as atomic fields and they
+won't have to be modified under a lock.
+
+Replace the unsigned in sd_count field in struct generic_pm_domain
+by an atomic_t one and modify the code in drivers/base/power/domain.c
+to take this change into account.
+
+This patch doesn't change the locking yet, that is going to be done
+in a separate subsequent patch.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c4bb3160c8823d3a1e581d7e05fb8b343097e7c8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   30 ++++++++++++++++++++----------
+ include/linux/pm_domain.h   |    2 +-
+ 2 files changed, 21 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 1c37457..20e2b52 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+       return pd_to_genpd(dev->pm_domain);
+ }
+-static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
++static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+ {
+-      if (!WARN_ON(genpd->sd_count == 0))
+-                      genpd->sd_count--;
++      bool ret = false;
++
++      if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
++              ret = !!atomic_dec_and_test(&genpd->sd_count);
++
++      return ret;
++}
++
++static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
++{
++      atomic_inc(&genpd->sd_count);
++      smp_mb__after_atomic_inc();
+ }
+ static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+@@ -118,7 +128,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+       genpd_set_active(genpd);
+       if (parent)
+-              parent->sd_count++;
++              genpd_sd_counter_inc(parent);
+  out:
+       mutex_unlock(&genpd->lock);
+@@ -254,7 +264,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+           || genpd->resume_count > 0)
+               return 0;
+-      if (genpd->sd_count > 0)
++      if (atomic_read(&genpd->sd_count) > 0)
+               return -EBUSY;
+       not_suspended = 0;
+@@ -325,8 +335,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       genpd->status = GPD_STATE_POWER_OFF;
+       if (parent) {
+-              genpd_sd_counter_dec(parent);
+-              if (parent->sd_count == 0)
++              if (genpd_sd_counter_dec(parent))
+                       genpd_queue_power_off_work(parent);
+               genpd_release_lock(parent);
+@@ -506,7 +515,8 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+       if (genpd->status == GPD_STATE_POWER_OFF)
+               return;
+-      if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
++      if (genpd->suspended_count != genpd->device_count
++          || atomic_read(&genpd->sd_count) > 0)
+               return;
+       if (genpd->power_off)
+@@ -1167,7 +1177,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+       list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+       new_subdomain->parent = genpd;
+       if (subdomain->status != GPD_STATE_POWER_OFF)
+-              genpd->sd_count++;
++              genpd_sd_counter_inc(genpd);
+  out:
+       mutex_unlock(&new_subdomain->lock);
+@@ -1242,7 +1252,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->gov = gov;
+       INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+       genpd->in_progress = 0;
+-      genpd->sd_count = 0;
++      atomic_set(&genpd->sd_count, 0);
+       genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+       init_waitqueue_head(&genpd->status_wait_queue);
+       genpd->poweroff_task = NULL;
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index f9ec173..81c5782 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -33,7 +33,7 @@ struct generic_pm_domain {
+       struct dev_power_governor *gov;
+       struct work_struct power_off_work;
+       unsigned int in_progress;       /* Number of devices being suspended now */
+-      unsigned int sd_count;  /* Number of subdomains with power "on" */
++      atomic_t sd_count;      /* Number of subdomains with power "on" */
+       enum gpd_status status; /* Current state of the domain */
+       wait_queue_head_t status_wait_queue;
+       struct task_struct *poweroff_task;      /* Powering off task */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0045-PM-Domains-Do-not-take-parent-locks-to-modify-subdom.patch b/patches.runtime_pm/0045-PM-Domains-Do-not-take-parent-locks-to-modify-subdom.patch
new file mode 100644 (file)
index 0000000..60e0b1e
--- /dev/null
@@ -0,0 +1,150 @@
+From 66926d50341a303bfaa8135cda00ec2fe0fa68a9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:14 +0200
+Subject: PM / Domains: Do not take parent locks to modify subdomain counters
+
+After the subdomain counter in struct generic_pm_domain has been
+changed into an atomic_t field, it is possible to modify
+pm_genpd_poweron() and pm_genpd_poweroff() so that they don't take
+the parents locks.  This requires pm_genpd_poweron() to increment
+the parent's subdomain counter before calling itself recursively
+for the parent and to decrement it if an error is to be returned.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 3c07cbc488bfd1ad1abf64d09cc692339b5f8a83)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   70 +++++++++++++++++++------------------------
+ 1 file changed, 31 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 20e2b52..ef25b6f 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -93,12 +93,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+       int ret = 0;
+  start:
+-      if (parent) {
+-              genpd_acquire_lock(parent);
+-              mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-      } else {
+-              mutex_lock(&genpd->lock);
+-      }
++      mutex_lock(&genpd->lock);
+       if (genpd->status == GPD_STATE_ACTIVE
+           || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+@@ -109,31 +104,33 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+               goto out;
+       }
+-      if (parent && parent->status != GPD_STATE_ACTIVE) {
++      if (parent) {
++              genpd_sd_counter_inc(parent);
++
+               mutex_unlock(&genpd->lock);
+-              genpd_release_lock(parent);
+               ret = pm_genpd_poweron(parent);
+-              if (ret)
++              if (ret) {
++                      genpd_sd_counter_dec(parent);
+                       return ret;
++              }
++              parent = NULL;
+               goto start;
+       }
+-      if (genpd->power_on) {
++      if (genpd->power_on)
+               ret = genpd->power_on(genpd);
+-              if (ret)
+-                      goto out;
+-      }
+-      genpd_set_active(genpd);
+-      if (parent)
+-              genpd_sd_counter_inc(parent);
++      if (ret) {
++              if (genpd->parent)
++                      genpd_sd_counter_dec(genpd->parent);
++      } else {
++              genpd_set_active(genpd);
++      }
+  out:
+       mutex_unlock(&genpd->lock);
+-      if (parent)
+-              genpd_release_lock(parent);
+       return ret;
+ }
+@@ -293,7 +290,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       genpd->poweroff_task = current;
+       list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+-              ret = __pm_genpd_save_device(dle, genpd);
++              ret = atomic_read(&genpd->sd_count) == 0 ?
++                      __pm_genpd_save_device(dle, genpd) : -EBUSY;
+               if (ret) {
+                       genpd_set_active(genpd);
+                       goto out;
+@@ -308,38 +306,32 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+               }
+       }
+-      parent = genpd->parent;
+-      if (parent) {
+-              mutex_unlock(&genpd->lock);
+-
+-              genpd_acquire_lock(parent);
+-              mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+-
+-              if (genpd_abort_poweroff(genpd)) {
+-                      genpd_release_lock(parent);
++      if (genpd->power_off) {
++              if (atomic_read(&genpd->sd_count) > 0) {
++                      ret = -EBUSY;
+                       goto out;
+               }
+-      }
+-      if (genpd->power_off) {
++              /*
++               * If sd_count > 0 at this point, one of the children hasn't
++               * managed to call pm_genpd_poweron() for the parent yet after
++               * incrementing it.  In that case pm_genpd_poweron() will wait
++               * for us to drop the lock, so we can call .power_off() and let
++               * the pm_genpd_poweron() restore power for us (this shouldn't
++               * happen very often).
++               */
+               ret = genpd->power_off(genpd);
+               if (ret == -EBUSY) {
+                       genpd_set_active(genpd);
+-                      if (parent)
+-                              genpd_release_lock(parent);
+-
+                       goto out;
+               }
+       }
+       genpd->status = GPD_STATE_POWER_OFF;
+-      if (parent) {
+-              if (genpd_sd_counter_dec(parent))
+-                      genpd_queue_power_off_work(parent);
+-
+-              genpd_release_lock(parent);
+-      }
++      parent = genpd->parent;
++      if (parent && genpd_sd_counter_dec(parent))
++              genpd_queue_power_off_work(parent);
+  out:
+       genpd->poweroff_task = NULL;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0046-PM-Domains-Make-pm_genpd_poweron-always-survive-pare.patch b/patches.runtime_pm/0046-PM-Domains-Make-pm_genpd_poweron-always-survive-pare.patch
new file mode 100644 (file)
index 0000000..51d7d40
--- /dev/null
@@ -0,0 +1,93 @@
+From f334d73383ca5539e6401bb3b696b036533471bd Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:22 +0200
+Subject: PM / Domains: Make pm_genpd_poweron() always survive parent removal
+
+If pm_genpd_remove_subdomain() is called to remove a PM domain's
+subdomain and pm_genpd_poweron() is called for that subdomain at
+the same time, and the pm_genpd_poweron() called by it recursively
+for the parent returns an error, the first pm_genpd_poweron()'s
+error code path will attempt to decrement the subdomain counter of
+a PM domain that it's not a subdomain of any more.
+
+Rearrange the code in pm_genpd_poweron() to prevent this from
+happening.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 9e08cf429697090d0fac57d493dc7b6de17a5eee)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index ef25b6f..dc423a9 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -89,12 +89,14 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
+  */
+ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
+-      struct generic_pm_domain *parent = genpd->parent;
++      struct generic_pm_domain *parent;
+       int ret = 0;
+- start:
+       mutex_lock(&genpd->lock);
++      parent = genpd->parent;
++
++ start:
+       if (genpd->status == GPD_STATE_ACTIVE
+           || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+               goto out;
+@@ -110,29 +112,34 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+               mutex_unlock(&genpd->lock);
+               ret = pm_genpd_poweron(parent);
+-              if (ret) {
+-                      genpd_sd_counter_dec(parent);
+-                      return ret;
+-              }
++
++              mutex_lock(&genpd->lock);
++
++              if (ret)
++                      goto err;
+               parent = NULL;
+               goto start;
+       }
+-      if (genpd->power_on)
++      if (genpd->power_on) {
+               ret = genpd->power_on(genpd);
+-
+-      if (ret) {
+-              if (genpd->parent)
+-                      genpd_sd_counter_dec(genpd->parent);
+-      } else {
+-              genpd_set_active(genpd);
++              if (ret)
++                      goto err;
+       }
++      genpd_set_active(genpd);
++
+  out:
+       mutex_unlock(&genpd->lock);
+       return ret;
++
++ err:
++      if (genpd->parent)
++              genpd_sd_counter_dec(genpd->parent);
++
++      goto out;
+ }
+ #endif /* CONFIG_PM */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0047-PM-Domains-Add-wait-for-parent-status-for-generic-PM.patch b/patches.runtime_pm/0047-PM-Domains-Add-wait-for-parent-status-for-generic-PM.patch
new file mode 100644 (file)
index 0000000..e428a18
--- /dev/null
@@ -0,0 +1,219 @@
+From 1388315fb6ce95ee5229a8d2f5e7355415205ba7 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:29 +0200
+Subject: PM / Domains: Add "wait for parent" status for generic PM domains
+
+The next patch will make it possible for a generic PM domain to have
+multiple parents (i.e. multiple PM domains it depends on).  To
+prepare for that change it is necessary to change pm_genpd_poweron()
+so that it doesn't jump to the start label after running itself
+recursively for the parent domain.  For this purpose, introduce a new
+PM domain status value GPD_STATE_WAIT_PARENT that will be set by
+pm_genpd_poweron() before calling itself recursively for the parent
+domain and modify the code in drivers/base/power/domain.c so that
+the GPD_STATE_WAIT_PARENT status is guaranteed to be preserved during
+the execution of pm_genpd_poweron() for the parent.
+
+This change also causes pm_genpd_add_subdomain() and
+pm_genpd_remove_subdomain() to wait for started pm_genpd_poweron() to
+complete and allows pm_genpd_runtime_resume() to avoid dropping the
+lock after powering on the PM domain.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 3f241775c30365c33a0d2f6d40f4cf12470f48c6)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   90 ++++++++++++++++++++++++++++---------------
+ include/linux/pm_domain.h   |    1 +
+ 2 files changed, 61 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index dc423a9..1f4b132 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -81,45 +81,59 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
+ }
+ /**
+- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
++ * __pm_genpd_poweron - Restore power to a given PM domain and its parents.
+  * @genpd: PM domain to power up.
+  *
+  * Restore power to @genpd and all of its parents so that it is possible to
+  * resume a device belonging to it.
+  */
+-int pm_genpd_poweron(struct generic_pm_domain *genpd)
++int __pm_genpd_poweron(struct generic_pm_domain *genpd)
++      __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+-      struct generic_pm_domain *parent;
++      DEFINE_WAIT(wait);
+       int ret = 0;
+-      mutex_lock(&genpd->lock);
++      /* If the domain's parent is being waited for, we have to wait too. */
++      for (;;) {
++              prepare_to_wait(&genpd->status_wait_queue, &wait,
++                              TASK_UNINTERRUPTIBLE);
++              if (genpd->status != GPD_STATE_WAIT_PARENT)
++                      break;
++              mutex_unlock(&genpd->lock);
+-      parent = genpd->parent;
++              schedule();
++
++              mutex_lock(&genpd->lock);
++      }
++      finish_wait(&genpd->status_wait_queue, &wait);
+- start:
+       if (genpd->status == GPD_STATE_ACTIVE
+           || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+-              goto out;
++              return 0;
+       if (genpd->status != GPD_STATE_POWER_OFF) {
+               genpd_set_active(genpd);
+-              goto out;
++              return 0;
+       }
+-      if (parent) {
+-              genpd_sd_counter_inc(parent);
++      if (genpd->parent) {
++              genpd_sd_counter_inc(genpd->parent);
++              genpd->status = GPD_STATE_WAIT_PARENT;
+               mutex_unlock(&genpd->lock);
+-              ret = pm_genpd_poweron(parent);
++              ret = pm_genpd_poweron(genpd->parent);
+               mutex_lock(&genpd->lock);
++              /*
++               * The "wait for parent" status is guaranteed not to change
++               * while the parent is powering on.
++               */
++              genpd->status = GPD_STATE_POWER_OFF;
++              wake_up_all(&genpd->status_wait_queue);
+               if (ret)
+                       goto err;
+-
+-              parent = NULL;
+-              goto start;
+       }
+       if (genpd->power_on) {
+@@ -130,16 +144,27 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+       genpd_set_active(genpd);
+- out:
+-      mutex_unlock(&genpd->lock);
+-
+-      return ret;
++      return 0;
+  err:
+       if (genpd->parent)
+               genpd_sd_counter_dec(genpd->parent);
+-      goto out;
++      return ret;
++}
++
++/**
++ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
++ * @genpd: PM domain to power up.
++ */
++int pm_genpd_poweron(struct generic_pm_domain *genpd)
++{
++      int ret;
++
++      mutex_lock(&genpd->lock);
++      ret = __pm_genpd_poweron(genpd);
++      mutex_unlock(&genpd->lock);
++      return ret;
+ }
+ #endif /* CONFIG_PM */
+@@ -225,7 +250,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+  */
+ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+ {
+-      return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
++      return genpd->status == GPD_STATE_WAIT_PARENT
++              || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+ }
+ /**
+@@ -261,11 +287,13 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       /*
+        * Do not try to power off the domain in the following situations:
+        * (1) The domain is already in the "power off" state.
+-       * (2) System suspend is in progress.
++       * (2) The domain is waiting for its parent to power up.
+        * (3) One of the domain's devices is being resumed right now.
++       * (4) System suspend is in progress.
+        */
+-      if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+-          || genpd->resume_count > 0)
++      if (genpd->status == GPD_STATE_POWER_OFF
++          || genpd->status == GPD_STATE_WAIT_PARENT
++          || genpd->resume_count > 0 || genpd->prepared_count > 0)
+               return 0;
+       if (atomic_read(&genpd->sd_count) > 0)
+@@ -299,14 +327,15 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+               ret = atomic_read(&genpd->sd_count) == 0 ?
+                       __pm_genpd_save_device(dle, genpd) : -EBUSY;
++
++              if (genpd_abort_poweroff(genpd))
++                      goto out;
++
+               if (ret) {
+                       genpd_set_active(genpd);
+                       goto out;
+               }
+-              if (genpd_abort_poweroff(genpd))
+-                      goto out;
+-
+               if (genpd->status == GPD_STATE_REPEAT) {
+                       genpd->poweroff_task = NULL;
+                       goto start;
+@@ -432,11 +461,12 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      ret = pm_genpd_poweron(genpd);
+-      if (ret)
+-              return ret;
+-
+       mutex_lock(&genpd->lock);
++      ret = __pm_genpd_poweron(genpd);
++      if (ret) {
++              mutex_unlock(&genpd->lock);
++              return ret;
++      }
+       genpd->status = GPD_STATE_BUSY;
+       genpd->resume_count++;
+       for (;;) {
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 81c5782..97e3f8e 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -13,6 +13,7 @@
+ enum gpd_status {
+       GPD_STATE_ACTIVE = 0,   /* PM domain is active */
++      GPD_STATE_WAIT_PARENT,  /* PM domain's parent is being waited for */
+       GPD_STATE_BUSY,         /* Something is happening to the PM domain */
+       GPD_STATE_REPEAT,       /* Power off in progress, to be repeated */
+       GPD_STATE_POWER_OFF,    /* PM domain is off */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0048-PM-Domains-Allow-generic-PM-domains-to-have-multiple.patch b/patches.runtime_pm/0048-PM-Domains-Allow-generic-PM-domains-to-have-multiple.patch
new file mode 100644 (file)
index 0000000..f1bd964
--- /dev/null
@@ -0,0 +1,329 @@
+From 1645042457f83c61cdc0c08a6e2e493d2bdb9f0c Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:40 +0200
+Subject: PM / Domains: Allow generic PM domains to have multiple masters
+
+Currently, for a given generic PM domain there may be only one parent
+domain (i.e. a PM domain it depends on).  However, there is at least
+one real-life case in which there should be two parents (masters) for
+one PM domain (the A3RV domain on SH7372 turns out to depend on the
+A4LC domain and it depends on the A4R domain and the same time). For
+this reason, allow a PM domain to have multiple parents (masters) by
+introducing objects representing links between PM domains.
+
+The (logical) links between PM domains represent relationships in
+which one domain is a master (i.e. it is depended on) and another
+domain is a slave (i.e. it depends on the master) with the rule that
+the slave cannot be powered on if the master is not powered on and
+the master cannot be powered off if the slave is not powered off.
+Each struct generic_pm_domain object representing a PM domain has
+two lists of links, a list of links in which it is a master and
+a list of links in which it is a slave.  The first of these lists
+replaces the list of subdomains and the second one is used in place
+of the parent pointer.
+
+Each link is represented by struct gpd_link object containing
+pointers to the master and the slave and two struct list_head
+members allowing it to hook into two lists (the master's list
+of "master" links and the slave's list of "slave" links).  This
+allows the code to get to the link from each side (either from
+the master or from the slave) and follow it in each direction.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 5063ce1571b73865cbdcd92db002e85809750c97)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   99 +++++++++++++++++++++++++------------------
+ include/linux/pm_domain.h   |   12 ++++--
+ 2 files changed, 67 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 1f4b132..8fc538d 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -81,19 +81,20 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
+ }
+ /**
+- * __pm_genpd_poweron - Restore power to a given PM domain and its parents.
++ * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
+  * @genpd: PM domain to power up.
+  *
+- * Restore power to @genpd and all of its parents so that it is possible to
++ * Restore power to @genpd and all of its masters so that it is possible to
+  * resume a device belonging to it.
+  */
+ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
++      struct gpd_link *link;
+       DEFINE_WAIT(wait);
+       int ret = 0;
+-      /* If the domain's parent is being waited for, we have to wait too. */
++      /* If the domain's master is being waited for, we have to wait too. */
+       for (;;) {
+               prepare_to_wait(&genpd->status_wait_queue, &wait,
+                               TASK_UNINTERRUPTIBLE);
+@@ -116,24 +117,31 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+               return 0;
+       }
+-      if (genpd->parent) {
+-              genpd_sd_counter_inc(genpd->parent);
++      /*
++       * The list is guaranteed not to change while the loop below is being
++       * executed, unless one of the masters' .power_on() callbacks fiddles
++       * with it.
++       */
++      list_for_each_entry(link, &genpd->slave_links, slave_node) {
++              genpd_sd_counter_inc(link->master);
+               genpd->status = GPD_STATE_WAIT_PARENT;
+               mutex_unlock(&genpd->lock);
+-              ret = pm_genpd_poweron(genpd->parent);
++              ret = pm_genpd_poweron(link->master);
+               mutex_lock(&genpd->lock);
+               /*
+                * The "wait for parent" status is guaranteed not to change
+-               * while the parent is powering on.
++               * while the master is powering on.
+                */
+               genpd->status = GPD_STATE_POWER_OFF;
+               wake_up_all(&genpd->status_wait_queue);
+-              if (ret)
++              if (ret) {
++                      genpd_sd_counter_dec(link->master);
+                       goto err;
++              }
+       }
+       if (genpd->power_on) {
+@@ -147,14 +155,14 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+       return 0;
+  err:
+-      if (genpd->parent)
+-              genpd_sd_counter_dec(genpd->parent);
++      list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
++              genpd_sd_counter_dec(link->master);
+       return ret;
+ }
+ /**
+- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
++ * pm_genpd_poweron - Restore power to a given PM domain and its masters.
+  * @genpd: PM domain to power up.
+  */
+ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+@@ -278,8 +286,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+-      struct generic_pm_domain *parent;
+       struct dev_list_entry *dle;
++      struct gpd_link *link;
+       unsigned int not_suspended;
+       int ret = 0;
+@@ -287,7 +295,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       /*
+        * Do not try to power off the domain in the following situations:
+        * (1) The domain is already in the "power off" state.
+-       * (2) The domain is waiting for its parent to power up.
++       * (2) The domain is waiting for its master to power up.
+        * (3) One of the domain's devices is being resumed right now.
+        * (4) System suspend is in progress.
+        */
+@@ -349,8 +357,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+               }
+               /*
+-               * If sd_count > 0 at this point, one of the children hasn't
+-               * managed to call pm_genpd_poweron() for the parent yet after
++               * If sd_count > 0 at this point, one of the subdomains hasn't
++               * managed to call pm_genpd_poweron() for the master yet after
+                * incrementing it.  In that case pm_genpd_poweron() will wait
+                * for us to drop the lock, so we can call .power_off() and let
+                * the pm_genpd_poweron() restore power for us (this shouldn't
+@@ -365,9 +373,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       genpd->status = GPD_STATE_POWER_OFF;
+-      parent = genpd->parent;
+-      if (parent && genpd_sd_counter_dec(parent))
+-              genpd_queue_power_off_work(parent);
++      list_for_each_entry(link, &genpd->slave_links, slave_node) {
++              genpd_sd_counter_dec(link->master);
++              genpd_queue_power_off_work(link->master);
++      }
+  out:
+       genpd->poweroff_task = NULL;
+@@ -527,11 +536,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
+ #ifdef CONFIG_PM_SLEEP
+ /**
+- * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
++ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
+  * @genpd: PM domain to power off, if possible.
+  *
+  * Check if the given PM domain can be powered off (during system suspend or
+- * hibernation) and do that if so.  Also, in that case propagate to its parent.
++ * hibernation) and do that if so.  Also, in that case propagate to its masters.
+  *
+  * This function is only called in "noirq" stages of system power transitions,
+  * so it need not acquire locks (all of the "noirq" callbacks are executed
+@@ -539,7 +548,7 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
+  */
+ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+ {
+-      struct generic_pm_domain *parent = genpd->parent;
++      struct gpd_link *link;
+       if (genpd->status == GPD_STATE_POWER_OFF)
+               return;
+@@ -552,9 +561,10 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+               genpd->power_off(genpd);
+       genpd->status = GPD_STATE_POWER_OFF;
+-      if (parent) {
+-              genpd_sd_counter_dec(parent);
+-              pm_genpd_sync_poweroff(parent);
++
++      list_for_each_entry(link, &genpd->slave_links, slave_node) {
++              genpd_sd_counter_dec(link->master);
++              pm_genpd_sync_poweroff(link->master);
+       }
+ }
+@@ -1173,7 +1183,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+                          struct generic_pm_domain *new_subdomain)
+ {
+-      struct generic_pm_domain *subdomain;
++      struct gpd_link *link;
+       int ret = 0;
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+@@ -1196,16 +1206,23 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+               goto out;
+       }
+-      list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+-              if (subdomain == new_subdomain) {
++      list_for_each_entry(link, &genpd->slave_links, slave_node) {
++              if (link->slave == new_subdomain && link->master == genpd) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+-      list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+-      new_subdomain->parent = genpd;
+-      if (subdomain->status != GPD_STATE_POWER_OFF)
++      link = kzalloc(sizeof(*link), GFP_KERNEL);
++      if (!link) {
++              ret = -ENOMEM;
++              goto out;
++      }
++      link->master = genpd;
++      list_add_tail(&link->master_node, &genpd->master_links);
++      link->slave = new_subdomain;
++      list_add_tail(&link->slave_node, &new_subdomain->slave_links);
++      if (new_subdomain->status != GPD_STATE_POWER_OFF)
+               genpd_sd_counter_inc(genpd);
+  out:
+@@ -1218,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ /**
+  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+  * @genpd: Master PM domain to remove the subdomain from.
+- * @target: Subdomain to be removed.
++ * @subdomain: Subdomain to be removed.
+  */
+ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+-                            struct generic_pm_domain *target)
++                            struct generic_pm_domain *subdomain)
+ {
+-      struct generic_pm_domain *subdomain;
++      struct gpd_link *link;
+       int ret = -EINVAL;
+-      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
++      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+               return -EINVAL;
+  start:
+       genpd_acquire_lock(genpd);
+-      list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+-              if (subdomain != target)
++      list_for_each_entry(link, &genpd->master_links, master_node) {
++              if (link->slave != subdomain)
+                       continue;
+               mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+@@ -1245,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+                       goto start;
+               }
+-              list_del(&subdomain->sd_node);
+-              subdomain->parent = NULL;
++              list_del(&link->master_node);
++              list_del(&link->slave_node);
++              kfree(link);
+               if (subdomain->status != GPD_STATE_POWER_OFF)
+                       genpd_sd_counter_dec(genpd);
+@@ -1273,10 +1291,9 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       if (IS_ERR_OR_NULL(genpd))
+               return;
+-      INIT_LIST_HEAD(&genpd->sd_node);
+-      genpd->parent = NULL;
++      INIT_LIST_HEAD(&genpd->master_links);
++      INIT_LIST_HEAD(&genpd->slave_links);
+       INIT_LIST_HEAD(&genpd->dev_list);
+-      INIT_LIST_HEAD(&genpd->sd_list);
+       mutex_init(&genpd->lock);
+       genpd->gov = gov;
+       INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 97e3f8e..5f5154d 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -26,9 +26,8 @@ struct dev_power_governor {
+ struct generic_pm_domain {
+       struct dev_pm_domain domain;    /* PM domain operations */
+       struct list_head gpd_list_node; /* Node in the global PM domains list */
+-      struct list_head sd_node;       /* Node in the parent's subdomain list */
+-      struct generic_pm_domain *parent;       /* Parent PM domain */
+-      struct list_head sd_list;       /* List of dubdomains */
++      struct list_head master_links;  /* Links with PM domain as a master */
++      struct list_head slave_links;   /* Links with PM domain as a slave */
+       struct list_head dev_list;      /* List of devices */
+       struct mutex lock;
+       struct dev_power_governor *gov;
+@@ -55,6 +54,13 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+       return container_of(pd, struct generic_pm_domain, domain);
+ }
++struct gpd_link {
++      struct generic_pm_domain *master;
++      struct list_head master_node;
++      struct generic_pm_domain *slave;
++      struct list_head slave_node;
++};
++
+ struct dev_list_entry {
+       struct list_head node;
+       struct device *dev;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0049-PM-Domains-Rename-GPD_STATE_WAIT_PARENT-to-GPD_STATE.patch b/patches.runtime_pm/0049-PM-Domains-Rename-GPD_STATE_WAIT_PARENT-to-GPD_STATE.patch
new file mode 100644 (file)
index 0000000..4b6f86b
--- /dev/null
@@ -0,0 +1,74 @@
+From 3aec31f6bf2c28a8f9ace3173a02b18bbc76e75e Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:50 +0200
+Subject: PM / Domains: Rename GPD_STATE_WAIT_PARENT to GPD_STATE_WAIT_MASTER
+
+Since it is now possible for a PM domain to have multiple masters
+instead of one parent, rename the "wait for parent" status to reflect
+the new situation.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 17877eb5a900f32bb5827a7b2109b6c9adff5fc3)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    8 ++++----
+ include/linux/pm_domain.h   |    2 +-
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 8fc538d..c06f8f8 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -98,7 +98,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+       for (;;) {
+               prepare_to_wait(&genpd->status_wait_queue, &wait,
+                               TASK_UNINTERRUPTIBLE);
+-              if (genpd->status != GPD_STATE_WAIT_PARENT)
++              if (genpd->status != GPD_STATE_WAIT_MASTER)
+                       break;
+               mutex_unlock(&genpd->lock);
+@@ -124,7 +124,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+        */
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+               genpd_sd_counter_inc(link->master);
+-              genpd->status = GPD_STATE_WAIT_PARENT;
++              genpd->status = GPD_STATE_WAIT_MASTER;
+               mutex_unlock(&genpd->lock);
+@@ -258,7 +258,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+  */
+ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+ {
+-      return genpd->status == GPD_STATE_WAIT_PARENT
++      return genpd->status == GPD_STATE_WAIT_MASTER
+               || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+ }
+@@ -300,7 +300,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+        * (4) System suspend is in progress.
+        */
+       if (genpd->status == GPD_STATE_POWER_OFF
+-          || genpd->status == GPD_STATE_WAIT_PARENT
++          || genpd->status == GPD_STATE_WAIT_MASTER
+           || genpd->resume_count > 0 || genpd->prepared_count > 0)
+               return 0;
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 5f5154d..bf679f5 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -13,7 +13,7 @@
+ enum gpd_status {
+       GPD_STATE_ACTIVE = 0,   /* PM domain is active */
+-      GPD_STATE_WAIT_PARENT,  /* PM domain's parent is being waited for */
++      GPD_STATE_WAIT_MASTER,  /* PM domain's master is being waited for */
+       GPD_STATE_BUSY,         /* Something is happening to the PM domain */
+       GPD_STATE_REPEAT,       /* Power off in progress, to be repeated */
+       GPD_STATE_POWER_OFF,    /* PM domain is off */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0050-PM-Domains-Rename-argument-of-pm_genpd_add_subdomain.patch b/patches.runtime_pm/0050-PM-Domains-Rename-argument-of-pm_genpd_add_subdomain.patch
new file mode 100644 (file)
index 0000000..94129d9
--- /dev/null
@@ -0,0 +1,88 @@
+From 7cd4fbe5c104115c16e8fdac190827e8eef704b1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 8 Aug 2011 23:43:59 +0200
+Subject: PM / Domains: Rename argument of pm_genpd_add_subdomain()
+
+Change the name of the second argument of pm_genpd_add_subdomain()
+so that it is (a) shorter and (b) in agreement with the name of
+the second argument of pm_genpd_add_subdomain().
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit bc0403ff16e5305c3a14c2b0826616ceaabbf058)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index c06f8f8..1fc6cc9 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1178,36 +1178,36 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ /**
+  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+  * @genpd: Master PM domain to add the subdomain to.
+- * @new_subdomain: Subdomain to be added.
++ * @subdomain: Subdomain to be added.
+  */
+ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+-                         struct generic_pm_domain *new_subdomain)
++                         struct generic_pm_domain *subdomain)
+ {
+       struct gpd_link *link;
+       int ret = 0;
+-      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
++      if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+               return -EINVAL;
+  start:
+       genpd_acquire_lock(genpd);
+-      mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
++      mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+-      if (new_subdomain->status != GPD_STATE_POWER_OFF
+-          && new_subdomain->status != GPD_STATE_ACTIVE) {
+-              mutex_unlock(&new_subdomain->lock);
++      if (subdomain->status != GPD_STATE_POWER_OFF
++          && subdomain->status != GPD_STATE_ACTIVE) {
++              mutex_unlock(&subdomain->lock);
+               genpd_release_lock(genpd);
+               goto start;
+       }
+       if (genpd->status == GPD_STATE_POWER_OFF
+-          &&  new_subdomain->status != GPD_STATE_POWER_OFF) {
++          &&  subdomain->status != GPD_STATE_POWER_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+-              if (link->slave == new_subdomain && link->master == genpd) {
++              if (link->slave == subdomain && link->master == genpd) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+@@ -1220,13 +1220,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+       }
+       link->master = genpd;
+       list_add_tail(&link->master_node, &genpd->master_links);
+-      link->slave = new_subdomain;
+-      list_add_tail(&link->slave_node, &new_subdomain->slave_links);
+-      if (new_subdomain->status != GPD_STATE_POWER_OFF)
++      link->slave = subdomain;
++      list_add_tail(&link->slave_node, &subdomain->slave_links);
++      if (subdomain->status != GPD_STATE_POWER_OFF)
+               genpd_sd_counter_inc(genpd);
+  out:
+-      mutex_unlock(&new_subdomain->lock);
++      mutex_unlock(&subdomain->lock);
+       genpd_release_lock(genpd);
+       return ret;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0051-PM-Introduce-struct-pm_subsys_data.patch b/patches.runtime_pm/0051-PM-Introduce-struct-pm_subsys_data.patch
new file mode 100644 (file)
index 0000000..c1ac2c9
--- /dev/null
@@ -0,0 +1,391 @@
+From 308693a5a7a06e1c48d67441c0fc0f9a9e8538c8 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 25 Aug 2011 15:33:50 +0200
+Subject: PM: Introduce struct pm_subsys_data
+
+Introduce struct pm_subsys_data that may be subclassed by subsystems
+to store subsystem-specific information related to the device.  Move
+the clock management fields accessed through the power.subsys_data
+pointer in struct device to the new strucutre.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 5c095a0e0d600d5a5a4207eaadabd18db46395ce)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/clock_ops.c |  122 +++++++++++++++++++++-------------------
+ include/linux/device.h         |    5 ++
+ include/linux/pm.h             |    9 ++-
+ include/linux/pm_runtime.h     |    8 ++-
+ 4 files changed, 83 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index 2c18d58..b7f1db4 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -17,11 +17,6 @@
+ #ifdef CONFIG_PM
+-struct pm_clk_data {
+-      struct list_head clock_list;
+-      spinlock_t lock;
+-};
+-
+ enum pce_status {
+       PCE_STATUS_NONE = 0,
+       PCE_STATUS_ACQUIRED,
+@@ -36,11 +31,6 @@ struct pm_clock_entry {
+       enum pce_status status;
+ };
+-static struct pm_clk_data *__to_pcd(struct device *dev)
+-{
+-      return dev ? dev->power.subsys_data : NULL;
+-}
+-
+ /**
+  * pm_clk_add - Start using a device clock for power management.
+  * @dev: Device whose clock is going to be used for power management.
+@@ -51,10 +41,10 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
+  */
+ int pm_clk_add(struct device *dev, const char *con_id)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce;
+-      if (!pcd)
++      if (!psd)
+               return -EINVAL;
+       ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+@@ -73,9 +63,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
+               }
+       }
+-      spin_lock_irq(&pcd->lock);
+-      list_add_tail(&ce->node, &pcd->clock_list);
+-      spin_unlock_irq(&pcd->lock);
++      spin_lock_irq(&psd->lock);
++      list_add_tail(&ce->node, &psd->clock_list);
++      spin_unlock_irq(&psd->lock);
+       return 0;
+ }
+@@ -117,15 +107,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
+  */
+ void pm_clk_remove(struct device *dev, const char *con_id)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce;
+-      if (!pcd)
++      if (!psd)
+               return;
+-      spin_lock_irq(&pcd->lock);
++      spin_lock_irq(&psd->lock);
+-      list_for_each_entry(ce, &pcd->clock_list, node) {
++      list_for_each_entry(ce, &psd->clock_list, node) {
+               if (!con_id && !ce->con_id) {
+                       __pm_clk_remove(ce);
+                       break;
+@@ -137,29 +127,45 @@ void pm_clk_remove(struct device *dev, const char *con_id)
+               }
+       }
+-      spin_unlock_irq(&pcd->lock);
++      spin_unlock_irq(&psd->lock);
+ }
+ /**
+  * pm_clk_init - Initialize a device's list of power management clocks.
+  * @dev: Device to initialize the list of PM clocks for.
+  *
+- * Allocate a struct pm_clk_data object, initialize its lock member and
+- * make the @dev's power.subsys_data field point to it.
++ * Initialize the lock and clock_list members of the device's pm_subsys_data
++ * object.
+  */
+-int pm_clk_init(struct device *dev)
++void pm_clk_init(struct device *dev)
+ {
+-      struct pm_clk_data *pcd;
++      struct pm_subsys_data *psd = dev_to_psd(dev);
++
++      if (!psd)
++              return;
+-      pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
+-      if (!pcd) {
++      INIT_LIST_HEAD(&psd->clock_list);
++      spin_lock_init(&psd->lock);
++}
++
++/**
++ * pm_clk_create - Create and initialize a device's list of PM clocks.
++ * @dev: Device to create and initialize the list of PM clocks for.
++ *
++ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
++ * members and make the @dev's power.subsys_data field point to it.
++ */
++int pm_clk_create(struct device *dev)
++{
++      struct pm_subsys_data *psd;
++
++      psd = kzalloc(sizeof(*psd), GFP_KERNEL);
++      if (!psd) {
+               dev_err(dev, "Not enough memory for PM clock data.\n");
+               return -ENOMEM;
+       }
+-
+-      INIT_LIST_HEAD(&pcd->clock_list);
+-      spin_lock_init(&pcd->lock);
+-      dev->power.subsys_data = pcd;
++      dev->power.subsys_data = psd;
++      pm_clk_init(dev);
+       return 0;
+ }
+@@ -168,27 +174,27 @@ int pm_clk_init(struct device *dev)
+  * @dev: Device to destroy the list of PM clocks for.
+  *
+  * Clear the @dev's power.subsys_data field, remove the list of clock entries
+- * from the struct pm_clk_data object pointed to by it before and free
++ * from the struct pm_subsys_data object pointed to by it before and free
+  * that object.
+  */
+ void pm_clk_destroy(struct device *dev)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce, *c;
+-      if (!pcd)
++      if (!psd)
+               return;
+       dev->power.subsys_data = NULL;
+-      spin_lock_irq(&pcd->lock);
++      spin_lock_irq(&psd->lock);
+-      list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
++      list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+               __pm_clk_remove(ce);
+-      spin_unlock_irq(&pcd->lock);
++      spin_unlock_irq(&psd->lock);
+-      kfree(pcd);
++      kfree(psd);
+ }
+ #endif /* CONFIG_PM */
+@@ -218,18 +224,18 @@ static void pm_clk_acquire(struct device *dev,
+  */
+ int pm_clk_suspend(struct device *dev)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce;
+       unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+-      if (!pcd)
++      if (!psd)
+               return 0;
+-      spin_lock_irqsave(&pcd->lock, flags);
++      spin_lock_irqsave(&psd->lock, flags);
+-      list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
++      list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+               if (ce->status == PCE_STATUS_NONE)
+                       pm_clk_acquire(dev, ce);
+@@ -239,7 +245,7 @@ int pm_clk_suspend(struct device *dev)
+               }
+       }
+-      spin_unlock_irqrestore(&pcd->lock, flags);
++      spin_unlock_irqrestore(&psd->lock, flags);
+       return 0;
+ }
+@@ -250,18 +256,18 @@ int pm_clk_suspend(struct device *dev)
+  */
+ int pm_clk_resume(struct device *dev)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce;
+       unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+-      if (!pcd)
++      if (!psd)
+               return 0;
+-      spin_lock_irqsave(&pcd->lock, flags);
++      spin_lock_irqsave(&psd->lock, flags);
+-      list_for_each_entry(ce, &pcd->clock_list, node) {
++      list_for_each_entry(ce, &psd->clock_list, node) {
+               if (ce->status == PCE_STATUS_NONE)
+                       pm_clk_acquire(dev, ce);
+@@ -271,7 +277,7 @@ int pm_clk_resume(struct device *dev)
+               }
+       }
+-      spin_unlock_irqrestore(&pcd->lock, flags);
++      spin_unlock_irqrestore(&psd->lock, flags);
+       return 0;
+ }
+@@ -309,7 +315,7 @@ static int pm_clk_notify(struct notifier_block *nb,
+               if (dev->pm_domain)
+                       break;
+-              error = pm_clk_init(dev);
++              error = pm_clk_create(dev);
+               if (error)
+                       break;
+@@ -344,22 +350,22 @@ static int pm_clk_notify(struct notifier_block *nb,
+  */
+ int pm_clk_suspend(struct device *dev)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce;
+       unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+       /* If there is no driver, the clocks are already disabled. */
+-      if (!pcd || !dev->driver)
++      if (!psd || !dev->driver)
+               return 0;
+-      spin_lock_irqsave(&pcd->lock, flags);
++      spin_lock_irqsave(&psd->lock, flags);
+-      list_for_each_entry_reverse(ce, &pcd->clock_list, node)
++      list_for_each_entry_reverse(ce, &psd->clock_list, node)
+               clk_disable(ce->clk);
+-      spin_unlock_irqrestore(&pcd->lock, flags);
++      spin_unlock_irqrestore(&psd->lock, flags);
+       return 0;
+ }
+@@ -370,22 +376,22 @@ int pm_clk_suspend(struct device *dev)
+  */
+ int pm_clk_resume(struct device *dev)
+ {
+-      struct pm_clk_data *pcd = __to_pcd(dev);
++      struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce;
+       unsigned long flags;
+       dev_dbg(dev, "%s()\n", __func__);
+       /* If there is no driver, the clocks should remain disabled. */
+-      if (!pcd || !dev->driver)
++      if (!psd || !dev->driver)
+               return 0;
+-      spin_lock_irqsave(&pcd->lock, flags);
++      spin_lock_irqsave(&psd->lock, flags);
+-      list_for_each_entry(ce, &pcd->clock_list, node)
++      list_for_each_entry(ce, &psd->clock_list, node)
+               clk_enable(ce->clk);
+-      spin_unlock_irqrestore(&pcd->lock, flags);
++      spin_unlock_irqrestore(&psd->lock, flags);
+       return 0;
+ }
+diff --git a/include/linux/device.h b/include/linux/device.h
+index ad8ecfd..4980cc0 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -706,6 +706,11 @@ static inline void set_dev_node(struct device *dev, int node)
+ }
+ #endif
++static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
++{
++      return dev ? dev->power.subsys_data : NULL;
++}
++
+ static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
+ {
+       return dev->kobj.uevent_suppress;
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 18de9f8..1137f99 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -423,6 +423,13 @@ enum rpm_request {
+ struct wakeup_source;
++struct pm_subsys_data {
++      spinlock_t lock;
++#ifdef CONFIG_PM_CLK
++      struct list_head clock_list;
++#endif
++};
++
+ struct dev_pm_info {
+       pm_message_t            power_state;
+       unsigned int            can_wakeup:1;
+@@ -464,7 +471,7 @@ struct dev_pm_info {
+       unsigned long           suspended_jiffies;
+       unsigned long           accounting_timestamp;
+ #endif
+-      void                    *subsys_data;  /* Owned by the subsystem. */
++      struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
+ };
+ extern void update_pm_runtime_accounting(struct device *dev);
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index daac05d..6b90630e 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -258,14 +258,18 @@ struct pm_clk_notifier_block {
+ };
+ #ifdef CONFIG_PM_CLK
+-extern int pm_clk_init(struct device *dev);
++extern void pm_clk_init(struct device *dev);
++extern int pm_clk_create(struct device *dev);
+ extern void pm_clk_destroy(struct device *dev);
+ extern int pm_clk_add(struct device *dev, const char *con_id);
+ extern void pm_clk_remove(struct device *dev, const char *con_id);
+ extern int pm_clk_suspend(struct device *dev);
+ extern int pm_clk_resume(struct device *dev);
+ #else
+-static inline int pm_clk_init(struct device *dev)
++static inline void pm_clk_init(struct device *dev)
++{
++}
++static inline int pm_clk_create(struct device *dev)
+ {
+       return -EINVAL;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0052-PM-Reference-counting-of-power.subsys_data.patch b/patches.runtime_pm/0052-PM-Reference-counting-of-power.subsys_data.patch
new file mode 100644 (file)
index 0000000..9d9bd58
--- /dev/null
@@ -0,0 +1,207 @@
+From c62f2b207cc15ead1512b09207525defda8191d1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 25 Aug 2011 15:34:01 +0200
+Subject: PM: Reference counting of power.subsys_data
+
+Since the power.subsys_data device field will be used by multiple
+filesystems, introduce a reference counting mechanism for it to avoid
+freeing it prematurely or changing its value at a wrong time.
+
+Make the PM clocks management code that currently is the only user of
+power.subsys_data use the new reference counting.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit ef27bed1870dbd5fd363ff5ec51eebd5a695e277)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/Makefile    |    2 +-
+ drivers/base/power/clock_ops.c |   24 +++--------
+ drivers/base/power/common.c    |   87 ++++++++++++++++++++++++++++++++++++++++
+ include/linux/pm.h             |    3 ++
+ 4 files changed, 96 insertions(+), 20 deletions(-)
+ create mode 100644 drivers/base/power/common.c
+
+diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
+index 2639ae7..6488ce1 100644
+--- a/drivers/base/power/Makefile
++++ b/drivers/base/power/Makefile
+@@ -1,4 +1,4 @@
+-obj-$(CONFIG_PM)      += sysfs.o generic_ops.o
++obj-$(CONFIG_PM)      += sysfs.o generic_ops.o common.o
+ obj-$(CONFIG_PM_SLEEP)        += main.o wakeup.o
+ obj-$(CONFIG_PM_RUNTIME)      += runtime.o
+ obj-$(CONFIG_PM_TRACE_RTC)    += trace.o
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index b7f1db4..8383e24 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -140,12 +140,8 @@ void pm_clk_remove(struct device *dev, const char *con_id)
+ void pm_clk_init(struct device *dev)
+ {
+       struct pm_subsys_data *psd = dev_to_psd(dev);
+-
+-      if (!psd)
+-              return;
+-
+-      INIT_LIST_HEAD(&psd->clock_list);
+-      spin_lock_init(&psd->lock);
++      if (psd)
++              INIT_LIST_HEAD(&psd->clock_list);
+ }
+ /**
+@@ -157,16 +153,8 @@ void pm_clk_init(struct device *dev)
+  */
+ int pm_clk_create(struct device *dev)
+ {
+-      struct pm_subsys_data *psd;
+-
+-      psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+-      if (!psd) {
+-              dev_err(dev, "Not enough memory for PM clock data.\n");
+-              return -ENOMEM;
+-      }
+-      dev->power.subsys_data = psd;
+-      pm_clk_init(dev);
+-      return 0;
++      int ret = dev_pm_get_subsys_data(dev);
++      return ret < 0 ? ret : 0;
+ }
+ /**
+@@ -185,8 +173,6 @@ void pm_clk_destroy(struct device *dev)
+       if (!psd)
+               return;
+-      dev->power.subsys_data = NULL;
+-
+       spin_lock_irq(&psd->lock);
+       list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+@@ -194,7 +180,7 @@ void pm_clk_destroy(struct device *dev)
+       spin_unlock_irq(&psd->lock);
+-      kfree(psd);
++      dev_pm_put_subsys_data(dev);
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
+new file mode 100644
+index 0000000..d398cf0
+--- /dev/null
++++ b/drivers/base/power/common.c
+@@ -0,0 +1,87 @@
++/*
++ * drivers/base/power/common.c - Common device power management code.
++ *
++ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
++ *
++ * This file is released under the GPLv2.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/pm_runtime.h>
++
++/**
++ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
++ * @dev: Device to handle.
++ *
++ * If power.subsys_data is NULL, point it to a new object, otherwise increment
++ * its reference counter.  Return 1 if a new object has been created, otherwise
++ * return 0 or error code.
++ */
++int dev_pm_get_subsys_data(struct device *dev)
++{
++      struct pm_subsys_data *psd;
++      int ret = 0;
++
++      psd = kzalloc(sizeof(*psd), GFP_KERNEL);
++      if (!psd)
++              return -ENOMEM;
++
++      spin_lock_irq(&dev->power.lock);
++
++      if (dev->power.subsys_data) {
++              dev->power.subsys_data->refcount++;
++      } else {
++              spin_lock_init(&psd->lock);
++              psd->refcount = 1;
++              dev->power.subsys_data = psd;
++              pm_clk_init(dev);
++              psd = NULL;
++              ret = 1;
++      }
++
++      spin_unlock_irq(&dev->power.lock);
++
++      /* kfree() verifies that its argument is nonzero. */
++      kfree(psd);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
++
++/**
++ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
++ * @dev: Device to handle.
++ *
++ * If the reference counter of power.subsys_data is zero after dropping the
++ * reference, power.subsys_data is removed.  Return 1 if that happens or 0
++ * otherwise.
++ */
++int dev_pm_put_subsys_data(struct device *dev)
++{
++      struct pm_subsys_data *psd;
++      int ret = 0;
++
++      spin_lock_irq(&dev->power.lock);
++
++      psd = dev_to_psd(dev);
++      if (!psd) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      if (--psd->refcount == 0) {
++              dev->power.subsys_data = NULL;
++              kfree(psd);
++              ret = 1;
++      }
++
++ out:
++      spin_unlock_irq(&dev->power.lock);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 1137f99..5b9b36f 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -425,6 +425,7 @@ struct wakeup_source;
+ struct pm_subsys_data {
+       spinlock_t lock;
++      unsigned int refcount;
+ #ifdef CONFIG_PM_CLK
+       struct list_head clock_list;
+ #endif
+@@ -475,6 +476,8 @@ struct dev_pm_info {
+ };
+ extern void update_pm_runtime_accounting(struct device *dev);
++extern int dev_pm_get_subsys_data(struct device *dev);
++extern int dev_pm_put_subsys_data(struct device *dev);
+ /*
+  * Power domains provide callbacks that are executed during system suspend,
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0053-PM-Domains-Use-power.sybsys_data-to-reduce-overhead.patch b/patches.runtime_pm/0053-PM-Domains-Use-power.sybsys_data-to-reduce-overhead.patch
new file mode 100644 (file)
index 0000000..5304cf9
--- /dev/null
@@ -0,0 +1,519 @@
+From 7f53f19b45ca9ad725dac3799f7b79312e8a1936 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 25 Aug 2011 15:34:12 +0200
+Subject: PM / Domains: Use power.sybsys_data to reduce overhead
+
+Currently pm_genpd_runtime_resume() has to walk the list of devices
+from the device's PM domain to find the corresponding device list
+object containing the need_restore field to check if the driver's
+.runtime_resume() callback should be executed for the device.
+This is suboptimal and can be simplified by using power.sybsys_data
+to store device information used by the generic PM domains code.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 4605ab653c1f9d7cc2dda8033de215c9cee325f4)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+       arch/arm/mach-shmobile/pm_runtime.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ arch/arm/mach-omap1/pm_bus.c            |    1 +
+ arch/arm/mach-shmobile/board-ap4evb.c   |    1 +
+ arch/arm/mach-shmobile/board-mackerel.c |    2 +-
+ arch/arm/mach-shmobile/pm-sh7372.c      |    1 +
+ arch/arm/mach-shmobile/pm_runtime.c     |    1 +
+ drivers/base/power/clock_ops.c          |    2 +-
+ drivers/base/power/common.c             |    3 +-
+ drivers/base/power/domain.c             |   87 +++++++++++--------------------
+ include/linux/pm.h                      |    9 ++++
+ include/linux/pm_clock.h                |   71 +++++++++++++++++++++++++
+ include/linux/pm_domain.h               |    6 ---
+ include/linux/pm_runtime.h              |   46 ----------------
+ 12 files changed, 117 insertions(+), 113 deletions(-)
+ create mode 100644 include/linux/pm_clock.h
+
+diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
+index 943072d..7868e75 100644
+--- a/arch/arm/mach-omap1/pm_bus.c
++++ b/arch/arm/mach-omap1/pm_bus.c
+@@ -13,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/io.h>
+ #include <linux/pm_runtime.h>
++#include <linux/pm_clock.h>
+ #include <linux/platform_device.h>
+ #include <linux/mutex.h>
+ #include <linux/clk.h>
+diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
+index 629b0f4..3cbf5bf 100644
+--- a/arch/arm/mach-shmobile/board-ap4evb.c
++++ b/arch/arm/mach-shmobile/board-ap4evb.c
+@@ -42,6 +42,7 @@
+ #include <linux/leds.h>
+ #include <linux/input/sh_keysc.h>
+ #include <linux/usb/r8a66597.h>
++#include <linux/pm_clock.h>
+ #include <media/sh_mobile_ceu.h>
+ #include <media/sh_mobile_csi2.h>
+diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
+index 6d4bc29..70eca5a 100644
+--- a/arch/arm/mach-shmobile/board-mackerel.c
++++ b/arch/arm/mach-shmobile/board-mackerel.c
+@@ -39,7 +39,7 @@
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/physmap.h>
+-#include <linux/pm_runtime.h>
++#include <linux/pm_clock.h>
+ #include <linux/smsc911x.h>
+ #include <linux/sh_intc.h>
+ #include <linux/tca6416_keypad.h>
+diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
+index b516069..3019ce9 100644
+--- a/arch/arm/mach-shmobile/pm-sh7372.c
++++ b/arch/arm/mach-shmobile/pm-sh7372.c
+@@ -15,6 +15,7 @@
+ #include <linux/list.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
++#include <linux/pm_clock.h>
+ #include <asm/system.h>
+ #include <asm/io.h>
+ #include <asm/tlbflush.h>
+diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
+index 2bcde1c..27ddf43 100644
+--- a/arch/arm/mach-shmobile/pm_runtime.c
++++ b/arch/arm/mach-shmobile/pm_runtime.c
+@@ -14,6 +14,7 @@
+ #include <linux/kernel.h>
+ #include <linux/io.h>
+ #include <linux/pm_runtime.h>
++#include <linux/pm_clock.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/sh_clk.h>
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index 8383e24..cb44b58 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -10,7 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/io.h>
+ #include <linux/pm.h>
+-#include <linux/pm_runtime.h>
++#include <linux/pm_clock.h>
+ #include <linux/clk.h>
+ #include <linux/slab.h>
+ #include <linux/err.h>
+diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
+index d398cf0..29820c3 100644
+--- a/drivers/base/power/common.c
++++ b/drivers/base/power/common.c
+@@ -10,8 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+-#include <linux/device.h>
+-#include <linux/pm_runtime.h>
++#include <linux/pm_clock.h>
+ /**
+  * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 1fc6cc9..339eb2d 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -181,18 +181,18 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ /**
+  * __pm_genpd_save_device - Save the pre-suspend state of a device.
+- * @dle: Device list entry of the device to save the state of.
++ * @pdd: Domain data of the device to save the state of.
+  * @genpd: PM domain the device belongs to.
+  */
+-static int __pm_genpd_save_device(struct dev_list_entry *dle,
++static int __pm_genpd_save_device(struct pm_domain_data *pdd,
+                                 struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+-      struct device *dev = dle->dev;
++      struct device *dev = pdd->dev;
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+-      if (dle->need_restore)
++      if (pdd->need_restore)
+               return 0;
+       mutex_unlock(&genpd->lock);
+@@ -210,24 +210,24 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
+       mutex_lock(&genpd->lock);
+       if (!ret)
+-              dle->need_restore = true;
++              pdd->need_restore = true;
+       return ret;
+ }
+ /**
+  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
+- * @dle: Device list entry of the device to restore the state of.
++ * @pdd: Domain data of the device to restore the state of.
+  * @genpd: PM domain the device belongs to.
+  */
+-static void __pm_genpd_restore_device(struct dev_list_entry *dle,
++static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
+                                     struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+-      struct device *dev = dle->dev;
++      struct device *dev = pdd->dev;
+       struct device_driver *drv = dev->driver;
+-      if (!dle->need_restore)
++      if (!pdd->need_restore)
+               return;
+       mutex_unlock(&genpd->lock);
+@@ -244,7 +244,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+       mutex_lock(&genpd->lock);
+-      dle->need_restore = false;
++      pdd->need_restore = false;
+ }
+ /**
+@@ -286,7 +286,7 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
+-      struct dev_list_entry *dle;
++      struct pm_domain_data *pdd;
+       struct gpd_link *link;
+       unsigned int not_suspended;
+       int ret = 0;
+@@ -308,8 +308,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+               return -EBUSY;
+       not_suspended = 0;
+-      list_for_each_entry(dle, &genpd->dev_list, node)
+-              if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
++      list_for_each_entry(pdd, &genpd->dev_list, list_node)
++              if (pdd->dev->driver && !pm_runtime_suspended(pdd->dev))
+                       not_suspended++;
+       if (not_suspended > genpd->in_progress)
+@@ -332,9 +332,9 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       genpd->status = GPD_STATE_BUSY;
+       genpd->poweroff_task = current;
+-      list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
++      list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+               ret = atomic_read(&genpd->sd_count) == 0 ?
+-                      __pm_genpd_save_device(dle, genpd) : -EBUSY;
++                      __pm_genpd_save_device(pdd, genpd) : -EBUSY;
+               if (genpd_abort_poweroff(genpd))
+                       goto out;
+@@ -433,24 +433,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+ }
+ /**
+- * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+- * @dev: Device to resume.
+- * @genpd: PM domain the device belongs to.
+- */
+-static void __pm_genpd_runtime_resume(struct device *dev,
+-                                    struct generic_pm_domain *genpd)
+-{
+-      struct dev_list_entry *dle;
+-
+-      list_for_each_entry(dle, &genpd->dev_list, node) {
+-              if (dle->dev == dev) {
+-                      __pm_genpd_restore_device(dle, genpd);
+-                      break;
+-              }
+-      }
+-}
+-
+-/**
+  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+  * @dev: Device to resume.
+  *
+@@ -495,7 +477,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
+               mutex_lock(&genpd->lock);
+       }
+       finish_wait(&genpd->status_wait_queue, &wait);
+-      __pm_genpd_runtime_resume(dev, genpd);
++      __pm_genpd_restore_device(&dev->power.subsys_data->domain_data, genpd);
+       genpd->resume_count--;
+       genpd_set_active(genpd);
+       wake_up_all(&genpd->status_wait_queue);
+@@ -525,8 +507,6 @@ void pm_genpd_poweroff_unused(void)
+ #else
+ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+-static inline void __pm_genpd_runtime_resume(struct device *dev,
+-                                           struct generic_pm_domain *genpd) {}
+ #define pm_genpd_runtime_suspend      NULL
+ #define pm_genpd_runtime_resume               NULL
+@@ -1083,7 +1063,7 @@ static void pm_genpd_complete(struct device *dev)
+  */
+ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+ {
+-      struct dev_list_entry *dle;
++      struct pm_domain_data *pdd;
+       int ret = 0;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -1103,26 +1083,20 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+               goto out;
+       }
+-      list_for_each_entry(dle, &genpd->dev_list, node)
+-              if (dle->dev == dev) {
++      list_for_each_entry(pdd, &genpd->dev_list, list_node)
++              if (pdd->dev == dev) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+-      dle = kzalloc(sizeof(*dle), GFP_KERNEL);
+-      if (!dle) {
+-              ret = -ENOMEM;
+-              goto out;
+-      }
+-
+-      dle->dev = dev;
+-      dle->need_restore = false;
+-      list_add_tail(&dle->node, &genpd->dev_list);
+       genpd->device_count++;
+-      spin_lock_irq(&dev->power.lock);
+       dev->pm_domain = &genpd->domain;
+-      spin_unlock_irq(&dev->power.lock);
++      dev_pm_get_subsys_data(dev);
++      pdd = &dev->power.subsys_data->domain_data;
++      pdd->dev = dev;
++      pdd->need_restore = false;
++      list_add_tail(&pdd->list_node, &genpd->dev_list);
+  out:
+       genpd_release_lock(genpd);
+@@ -1138,7 +1112,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+                          struct device *dev)
+ {
+-      struct dev_list_entry *dle;
++      struct pm_domain_data *pdd;
+       int ret = -EINVAL;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -1153,17 +1127,16 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+               goto out;
+       }
+-      list_for_each_entry(dle, &genpd->dev_list, node) {
+-              if (dle->dev != dev)
++      list_for_each_entry(pdd, &genpd->dev_list, list_node) {
++              if (pdd->dev != dev)
+                       continue;
+-              spin_lock_irq(&dev->power.lock);
++              list_del_init(&pdd->list_node);
++              pdd->dev = NULL;
++              dev_pm_put_subsys_data(dev);
+               dev->pm_domain = NULL;
+-              spin_unlock_irq(&dev->power.lock);
+               genpd->device_count--;
+-              list_del(&dle->node);
+-              kfree(dle);
+               ret = 0;
+               break;
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 5b9b36f..b17b6aa 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -423,12 +423,21 @@ enum rpm_request {
+ struct wakeup_source;
++struct pm_domain_data {
++      struct list_head list_node;
++      struct device *dev;
++      bool need_restore;
++};
++
+ struct pm_subsys_data {
+       spinlock_t lock;
+       unsigned int refcount;
+ #ifdef CONFIG_PM_CLK
+       struct list_head clock_list;
+ #endif
++#ifdef CONFIG_PM_GENERIC_DOMAINS
++      struct pm_domain_data domain_data;
++#endif
+ };
+ struct dev_pm_info {
+diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
+new file mode 100644
+index 0000000..8348866
+--- /dev/null
++++ b/include/linux/pm_clock.h
+@@ -0,0 +1,71 @@
++/*
++ * pm_clock.h - Definitions and headers related to device clocks.
++ *
++ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
++ *
++ * This file is released under the GPLv2.
++ */
++
++#ifndef _LINUX_PM_CLOCK_H
++#define _LINUX_PM_CLOCK_H
++
++#include <linux/device.h>
++#include <linux/notifier.h>
++
++struct pm_clk_notifier_block {
++      struct notifier_block nb;
++      struct dev_pm_domain *pm_domain;
++      char *con_ids[];
++};
++
++#ifdef CONFIG_PM_CLK
++static inline bool pm_clk_no_clocks(struct device *dev)
++{
++      return dev && dev->power.subsys_data
++              && list_empty(&dev->power.subsys_data->clock_list);
++}
++
++extern void pm_clk_init(struct device *dev);
++extern int pm_clk_create(struct device *dev);
++extern void pm_clk_destroy(struct device *dev);
++extern int pm_clk_add(struct device *dev, const char *con_id);
++extern void pm_clk_remove(struct device *dev, const char *con_id);
++extern int pm_clk_suspend(struct device *dev);
++extern int pm_clk_resume(struct device *dev);
++#else
++static inline bool pm_clk_no_clocks(struct device *dev)
++{
++      return true;
++}
++static inline void pm_clk_init(struct device *dev)
++{
++}
++static inline int pm_clk_create(struct device *dev)
++{
++      return -EINVAL;
++}
++static inline void pm_clk_destroy(struct device *dev)
++{
++}
++static inline int pm_clk_add(struct device *dev, const char *con_id)
++{
++      return -EINVAL;
++}
++static inline void pm_clk_remove(struct device *dev, const char *con_id)
++{
++}
++#define pm_clk_suspend        NULL
++#define pm_clk_resume NULL
++#endif
++
++#ifdef CONFIG_HAVE_CLK
++extern void pm_clk_add_notifier(struct bus_type *bus,
++                                      struct pm_clk_notifier_block *clknb);
++#else
++static inline void pm_clk_add_notifier(struct bus_type *bus,
++                                      struct pm_clk_notifier_block *clknb)
++{
++}
++#endif
++
++#endif
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index bf679f5..5cce46c 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -61,12 +61,6 @@ struct gpd_link {
+       struct list_head slave_node;
+ };
+-struct dev_list_entry {
+-      struct list_head node;
+-      struct device *dev;
+-      bool need_restore;
+-};
+-
+ #ifdef CONFIG_PM_GENERIC_DOMAINS
+ extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                              struct device *dev);
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 6b90630e..70b2840 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -251,50 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
+       __pm_runtime_use_autosuspend(dev, false);
+ }
+-struct pm_clk_notifier_block {
+-      struct notifier_block nb;
+-      struct dev_pm_domain *pm_domain;
+-      char *con_ids[];
+-};
+-
+-#ifdef CONFIG_PM_CLK
+-extern void pm_clk_init(struct device *dev);
+-extern int pm_clk_create(struct device *dev);
+-extern void pm_clk_destroy(struct device *dev);
+-extern int pm_clk_add(struct device *dev, const char *con_id);
+-extern void pm_clk_remove(struct device *dev, const char *con_id);
+-extern int pm_clk_suspend(struct device *dev);
+-extern int pm_clk_resume(struct device *dev);
+-#else
+-static inline void pm_clk_init(struct device *dev)
+-{
+-}
+-static inline int pm_clk_create(struct device *dev)
+-{
+-      return -EINVAL;
+-}
+-static inline void pm_clk_destroy(struct device *dev)
+-{
+-}
+-static inline int pm_clk_add(struct device *dev, const char *con_id)
+-{
+-      return -EINVAL;
+-}
+-static inline void pm_clk_remove(struct device *dev, const char *con_id)
+-{
+-}
+-#define pm_clk_suspend        NULL
+-#define pm_clk_resume NULL
+-#endif
+-
+-#ifdef CONFIG_HAVE_CLK
+-extern void pm_clk_add_notifier(struct bus_type *bus,
+-                                      struct pm_clk_notifier_block *clknb);
+-#else
+-static inline void pm_clk_add_notifier(struct bus_type *bus,
+-                                      struct pm_clk_notifier_block *clknb)
+-{
+-}
+-#endif
+-
+ #endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0054-PM-QoS-Move-and-rename-the-implementation-files.patch b/patches.runtime_pm/0054-PM-QoS-Move-and-rename-the-implementation-files.patch
new file mode 100644 (file)
index 0000000..f5ab8d7
--- /dev/null
@@ -0,0 +1,326 @@
+From b9626fca3ad6cba770dbb85333e71d92b802f321 Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:03 +0200
+Subject: PM QoS: Move and rename the implementation files
+
+The PM QoS implementation files are better named
+kernel/power/qos.c and include/linux/pm_qos.h.
+
+The PM QoS support is compiled under the CONFIG_PM option.
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Acked-by: markgross <markgross@thegnar.org>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e8db0be1245de16a6cc6365506abc392c3c212d4)
+
+Conflicts:
+
+       kernel/Makefile
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ arch/arm/mach-msm/clock.c                   |    2 +-
+ drivers/acpi/processor_idle.c               |    2 +-
+ drivers/cpuidle/cpuidle.c                   |    2 +-
+ drivers/cpuidle/governors/ladder.c          |    2 +-
+ drivers/cpuidle/governors/menu.c            |    2 +-
+ drivers/media/video/via-camera.c            |    2 +-
+ drivers/net/e1000e/netdev.c                 |    2 +-
+ drivers/net/wireless/ipw2x00/ipw2100.c      |    2 +-
+ include/linux/netdevice.h                   |    2 +-
+ include/linux/{pm_qos_params.h => pm_qos.h} |   31 +++++++++++++++++++++++----
+ include/sound/pcm.h                         |    2 +-
+ kernel/Makefile                             |    2 +-
+ kernel/power/Makefile                       |    2 +-
+ kernel/{pm_qos_params.c => power/qos.c}     |    2 +-
+ net/mac80211/main.c                         |    2 +-
+ net/mac80211/mlme.c                         |    2 +-
+ net/mac80211/scan.c                         |    2 +-
+ sound/core/pcm_native.c                     |    2 +-
+ 18 files changed, 44 insertions(+), 21 deletions(-)
+ rename include/linux/{pm_qos_params.h => pm_qos.h} (53%)
+ rename kernel/{pm_qos_params.c => power/qos.c} (99%)
+
+diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
+index 22a5376..d9145df 100644
+--- a/arch/arm/mach-msm/clock.c
++++ b/arch/arm/mach-msm/clock.c
+@@ -18,7 +18,7 @@
+ #include <linux/list.h>
+ #include <linux/err.h>
+ #include <linux/spinlock.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/mutex.h>
+ #include <linux/clk.h>
+ #include <linux/string.h>
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 431ab11..2e69e09 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -37,7 +37,7 @@
+ #include <linux/dmi.h>
+ #include <linux/moduleparam.h>
+ #include <linux/sched.h>      /* need_resched() */
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/clockchips.h>
+ #include <linux/cpuidle.h>
+ #include <linux/irqflags.h>
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index bf50924..eed4c47 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -12,7 +12,7 @@
+ #include <linux/mutex.h>
+ #include <linux/sched.h>
+ #include <linux/notifier.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/cpu.h>
+ #include <linux/cpuidle.h>
+ #include <linux/ktime.h>
+diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
+index 12c9890..f62fde2 100644
+--- a/drivers/cpuidle/governors/ladder.c
++++ b/drivers/cpuidle/governors/ladder.c
+@@ -14,7 +14,7 @@
+ #include <linux/kernel.h>
+ #include <linux/cpuidle.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/moduleparam.h>
+ #include <linux/jiffies.h>
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index c47f3d0..3600f19 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -12,7 +12,7 @@
+ #include <linux/kernel.h>
+ #include <linux/cpuidle.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/time.h>
+ #include <linux/ktime.h>
+ #include <linux/hrtimer.h>
+diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
+index 85d3048..b3ca389 100644
+--- a/drivers/media/video/via-camera.c
++++ b/drivers/media/video/via-camera.c
+@@ -21,7 +21,7 @@
+ #include <media/videobuf-dma-sg.h>
+ #include <linux/delay.h>
+ #include <linux/dma-mapping.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/via-core.h>
+ #include <linux/via-gpio.h>
+ #include <linux/via_i2c.h>
+diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
+index 5430a9a..4e60214 100644
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -46,7 +46,7 @@
+ #include <linux/if_vlan.h>
+ #include <linux/cpu.h>
+ #include <linux/smp.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/aer.h>
+ #include <linux/prefetch.h>
+diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
+index 4430775..d9df575 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/ipw2x00/ipw2100.c
+@@ -161,7 +161,7 @@ that only one external action is invoked at a time.
+ #include <linux/firmware.h>
+ #include <linux/acpi.h>
+ #include <linux/ctype.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <net/lib80211.h>
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 33b5968..6155893 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -31,7 +31,7 @@
+ #include <linux/if_link.h>
+ #ifdef __KERNEL__
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/timer.h>
+ #include <linux/delay.h>
+ #include <linux/mm.h>
+diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos.h
+similarity index 53%
+rename from include/linux/pm_qos_params.h
+rename to include/linux/pm_qos.h
+index a7d87f9..7ba67541 100644
+--- a/include/linux/pm_qos_params.h
++++ b/include/linux/pm_qos.h
+@@ -1,5 +1,5 @@
+-#ifndef _LINUX_PM_QOS_PARAMS_H
+-#define _LINUX_PM_QOS_PARAMS_H
++#ifndef _LINUX_PM_QOS_H
++#define _LINUX_PM_QOS_H
+ /* interface for the pm_qos_power infrastructure of the linux kernel.
+  *
+  * Mark Gross <mgross@linux.intel.com>
+@@ -25,14 +25,37 @@ struct pm_qos_request_list {
+       int pm_qos_class;
+ };
+-void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value);
++#ifdef CONFIG_PM
++void pm_qos_add_request(struct pm_qos_request_list *l,
++                      int pm_qos_class, s32 value);
+ void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
+-              s32 new_value);
++                         s32 new_value);
+ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
+ int pm_qos_request(int pm_qos_class);
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request_list *req);
++#else
++static inline void pm_qos_add_request(struct pm_qos_request_list *l,
++                                    int pm_qos_class, s32 value)
++                      { return; }
++static inline void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
++                                       s32 new_value)
++                      { return; }
++static inline void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
++                      { return; }
++
++static inline int pm_qos_request(int pm_qos_class)
++                      { return 0; }
++static inline int pm_qos_add_notifier(int pm_qos_class,
++                                    struct notifier_block *notifier)
++                      { return 0; }
++static inline int pm_qos_remove_notifier(int pm_qos_class,
++                                       struct notifier_block *notifier)
++                      { return 0; }
++static inline int pm_qos_request_active(struct pm_qos_request_list *req)
++                      { return 0; }
++#endif
+ #endif
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index e1bad11..1204f17 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -29,7 +29,7 @@
+ #include <linux/poll.h>
+ #include <linux/mm.h>
+ #include <linux/bitops.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #define snd_pcm_substream_chip(substream) ((substream)->private_data)
+ #define snd_pcm_chip(pcm) ((pcm)->private_data)
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 2d64cfc..c4547c7 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -9,7 +9,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
+           rcupdate.o extable.o params.o posix-timers.o \
+           kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+           hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
+-          notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
++          notifier.o ksysfs.o sched_clock.o cred.o \
+           async.o range.o jump_label.o
+ obj-y += groups.o
+diff --git a/kernel/power/Makefile b/kernel/power/Makefile
+index c5ebc6a..ad6bdd8 100644
+--- a/kernel/power/Makefile
++++ b/kernel/power/Makefile
+@@ -1,7 +1,7 @@
+ ccflags-$(CONFIG_PM_DEBUG)    := -DDEBUG
+-obj-$(CONFIG_PM)              += main.o
++obj-$(CONFIG_PM)              += main.o qos.o
+ obj-$(CONFIG_PM_SLEEP)                += console.o
+ obj-$(CONFIG_FREEZER)         += process.o
+ obj-$(CONFIG_SUSPEND)         += suspend.o
+diff --git a/kernel/pm_qos_params.c b/kernel/power/qos.c
+similarity index 99%
+rename from kernel/pm_qos_params.c
+rename to kernel/power/qos.c
+index 6824ca7..3bf69f1 100644
+--- a/kernel/pm_qos_params.c
++++ b/kernel/power/qos.c
+@@ -29,7 +29,7 @@
+ /*#define DEBUG*/
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/sched.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 1e36fb3..e9f776a 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -19,7 +19,7 @@
+ #include <linux/if_arp.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/bitmap.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/inetdevice.h>
+ #include <net/net_namespace.h>
+ #include <net/cfg80211.h>
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 1563250..31e8694 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -17,7 +17,7 @@
+ #include <linux/if_arp.h>
+ #include <linux/etherdevice.h>
+ #include <linux/rtnetlink.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/crc32.h>
+ #include <linux/slab.h>
+ #include <net/mac80211.h>
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 669d2e3..37e6837 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -14,7 +14,7 @@
+ #include <linux/if_arp.h>
+ #include <linux/rtnetlink.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <net/sch_generic.h>
+ #include <linux/slab.h>
+ #include <net/mac80211.h>
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 1c6be91..c74e228 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -23,7 +23,7 @@
+ #include <linux/file.h>
+ #include <linux/slab.h>
+ #include <linux/time.h>
+-#include <linux/pm_qos_params.h>
++#include <linux/pm_qos.h>
+ #include <linux/uio.h>
+ #include <linux/dma-mapping.h>
+ #include <sound/core.h>
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0055-plist-Remove-the-need-to-supply-locks-to-plist-heads.patch b/patches.runtime_pm/0055-plist-Remove-the-need-to-supply-locks-to-plist-heads.patch
new file mode 100644 (file)
index 0000000..24a7397
--- /dev/null
@@ -0,0 +1,265 @@
+From ba0bdfe2987f9ffda40f93706c10cb37b03e2688 Mon Sep 17 00:00:00 2001
+From: Dima Zavin <dima@android.com>
+Date: Thu, 7 Jul 2011 17:27:59 -0700
+Subject: plist: Remove the need to supply locks to plist heads
+
+This was legacy code brought over from the RT tree and
+is no longer necessary.
+
+Signed-off-by: Dima Zavin <dima@android.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Daniel Walker <dwalker@codeaurora.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
+Link: http://lkml.kernel.org/r/1310084879-10351-2-git-send-email-dima@android.com
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+(cherry picked from commit 732375c6a5a4cc825b676c922d547aba96b8ce15)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/plist.h   |   55 +++--------------------------------------------
+ include/linux/rtmutex.h |    4 ++--
+ kernel/fork.c           |    2 +-
+ kernel/futex.c          |    2 +-
+ kernel/power/qos.c      |    6 +++---
+ kernel/rtmutex.c        |    2 +-
+ kernel/sched.c          |    4 ++--
+ lib/plist.c             |    7 +-----
+ 8 files changed, 14 insertions(+), 68 deletions(-)
+
+diff --git a/include/linux/plist.h b/include/linux/plist.h
+index c9b9f32..aa0fb39 100644
+--- a/include/linux/plist.h
++++ b/include/linux/plist.h
+@@ -77,14 +77,9 @@
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+-#include <linux/spinlock_types.h>
+ struct plist_head {
+       struct list_head node_list;
+-#ifdef CONFIG_DEBUG_PI_LIST
+-      raw_spinlock_t *rawlock;
+-      spinlock_t *spinlock;
+-#endif
+ };
+ struct plist_node {
+@@ -93,37 +88,13 @@ struct plist_node {
+       struct list_head        node_list;
+ };
+-#ifdef CONFIG_DEBUG_PI_LIST
+-# define PLIST_HEAD_LOCK_INIT(_lock)          .spinlock = _lock
+-# define PLIST_HEAD_LOCK_INIT_RAW(_lock)      .rawlock = _lock
+-#else
+-# define PLIST_HEAD_LOCK_INIT(_lock)
+-# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
+-#endif
+-
+-#define _PLIST_HEAD_INIT(head)                                \
+-      .node_list = LIST_HEAD_INIT((head).node_list)
+-
+ /**
+  * PLIST_HEAD_INIT - static struct plist_head initializer
+  * @head:     struct plist_head variable name
+- * @_lock:    lock to initialize for this list
+- */
+-#define PLIST_HEAD_INIT(head, _lock)                  \
+-{                                                     \
+-      _PLIST_HEAD_INIT(head),                         \
+-      PLIST_HEAD_LOCK_INIT(&(_lock))                  \
+-}
+-
+-/**
+- * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+- * @head:     struct plist_head variable name
+- * @_lock:    lock to initialize for this list
+  */
+-#define PLIST_HEAD_INIT_RAW(head, _lock)              \
++#define PLIST_HEAD_INIT(head)                         \
+ {                                                     \
+-      _PLIST_HEAD_INIT(head),                         \
+-      PLIST_HEAD_LOCK_INIT_RAW(&(_lock))              \
++      .node_list = LIST_HEAD_INIT((head).node_list)   \
+ }
+ /**
+@@ -141,31 +112,11 @@ struct plist_node {
+ /**
+  * plist_head_init - dynamic struct plist_head initializer
+  * @head:     &struct plist_head pointer
+- * @lock:     spinlock protecting the list (debugging)
+  */
+ static inline void
+-plist_head_init(struct plist_head *head, spinlock_t *lock)
++plist_head_init(struct plist_head *head)
+ {
+       INIT_LIST_HEAD(&head->node_list);
+-#ifdef CONFIG_DEBUG_PI_LIST
+-      head->spinlock = lock;
+-      head->rawlock = NULL;
+-#endif
+-}
+-
+-/**
+- * plist_head_init_raw - dynamic struct plist_head initializer
+- * @head:     &struct plist_head pointer
+- * @lock:     raw_spinlock protecting the list (debugging)
+- */
+-static inline void
+-plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+-{
+-      INIT_LIST_HEAD(&head->node_list);
+-#ifdef CONFIG_DEBUG_PI_LIST
+-      head->rawlock = lock;
+-      head->spinlock = NULL;
+-#endif
+ }
+ /**
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 8d522ff..de17134 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -66,7 +66,7 @@ struct hrtimer_sleeper;
+ #define __RT_MUTEX_INITIALIZER(mutexname) \
+       { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+-      , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
++      , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+       , .owner = NULL \
+       __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+@@ -100,7 +100,7 @@ extern void rt_mutex_unlock(struct rt_mutex *lock);
+ #ifdef CONFIG_RT_MUTEXES
+ # define INIT_RT_MUTEXES(tsk)                                         \
+-      .pi_waiters     = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
++      .pi_waiters     = PLIST_HEAD_INIT(tsk.pi_waiters),      \
+       INIT_RT_MUTEX_DEBUG(tsk)
+ #else
+ # define INIT_RT_MUTEXES(tsk)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index a4e453b..c66f27f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1016,7 +1016,7 @@ static void rt_mutex_init_task(struct task_struct *p)
+ {
+       raw_spin_lock_init(&p->pi_lock);
+ #ifdef CONFIG_RT_MUTEXES
+-      plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
++      plist_head_init(&p->pi_waiters);
+       p->pi_blocked_on = NULL;
+ #endif
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index a2a01ef..6e57b21 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2746,7 +2746,7 @@ static int __init futex_init(void)
+               futex_cmpxchg_enabled = 1;
+       for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+-              plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
++              plist_head_init(&futex_queues[i].chain);
+               spin_lock_init(&futex_queues[i].lock);
+       }
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 3bf69f1..61b4738 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pm_qos_lock);
+ static struct pm_qos_object null_pm_qos;
+ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+ static struct pm_qos_object cpu_dma_pm_qos = {
+-      .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
++      .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
+       .notifiers = &cpu_dma_lat_notifier,
+       .name = "cpu_dma_latency",
+       .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+@@ -84,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
+ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+ static struct pm_qos_object network_lat_pm_qos = {
+-      .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
++      .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
+       .notifiers = &network_lat_notifier,
+       .name = "network_latency",
+       .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+@@ -95,7 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
+ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+ static struct pm_qos_object network_throughput_pm_qos = {
+-      .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
++      .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
+       .notifiers = &network_throughput_notifier,
+       .name = "network_throughput",
+       .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index ab44911..255e166 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -890,7 +890,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+ {
+       lock->owner = NULL;
+       raw_spin_lock_init(&lock->wait_lock);
+-      plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
++      plist_head_init(&lock->wait_list);
+       debug_rt_mutex_init(lock, name);
+ }
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 063d7a4..75a7b6f 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7924,7 +7924,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
+ #ifdef CONFIG_SMP
+       rt_rq->rt_nr_migratory = 0;
+       rt_rq->overloaded = 0;
+-      plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
++      plist_head_init(&rt_rq->pushable_tasks);
+ #endif
+       rt_rq->rt_time = 0;
+@@ -8129,7 +8129,7 @@ void __init sched_init(void)
+ #endif
+ #ifdef CONFIG_RT_MUTEXES
+-      plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
++      plist_head_init(&init_task.pi_waiters);
+ #endif
+       /*
+diff --git a/lib/plist.c b/lib/plist.c
+index 0ae7e64..a0a4da4 100644
+--- a/lib/plist.c
++++ b/lib/plist.c
+@@ -56,11 +56,6 @@ static void plist_check_list(struct list_head *top)
+ static void plist_check_head(struct plist_head *head)
+ {
+-      WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
+-      if (head->rawlock)
+-              WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
+-      if (head->spinlock)
+-              WARN_ON_SMP(!spin_is_locked(head->spinlock));
+       if (!plist_head_empty(head))
+               plist_check_list(&plist_first(head)->prio_list);
+       plist_check_list(&head->node_list);
+@@ -180,7 +175,7 @@ static int  __init plist_test(void)
+       unsigned int r = local_clock();
+       printk(KERN_INFO "start plist test\n");
+-      plist_head_init(&test_head, NULL);
++      plist_head_init(&test_head);
+       for (i = 0; i < ARRAY_SIZE(test_node); i++)
+               plist_node_init(test_node + i, 0);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0056-PM-QoS-Minor-clean-ups.patch b/patches.runtime_pm/0056-PM-QoS-Minor-clean-ups.patch
new file mode 100644 (file)
index 0000000..6944b2a
--- /dev/null
@@ -0,0 +1,409 @@
+From 2bb17e08a33ec95c04c0b4d6ce44bbe78c4afafb Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:12 +0200
+Subject: PM QoS: Minor clean-ups
+
+ - Misc fixes to improve code readability:
+  * rename struct pm_qos_request_list to struct pm_qos_request,
+  * rename pm_qos_req parameter to req in internal code,
+    consistenly use req in the API parameters,
+  * update the in-kernel API callers to the new parameters names,
+  * rename of fields names (requests, list, node, constraints)
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Acked-by: markgross <markgross@thegnar.org>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit cc74998618a66d34651c784dd02412614c3e81cc)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/media/video/via-camera.c       |    2 +-
+ drivers/net/wireless/ipw2x00/ipw2100.c |    2 +-
+ include/linux/netdevice.h              |    2 +-
+ include/linux/pm_qos.h                 |   22 ++++----
+ include/sound/pcm.h                    |    2 +-
+ kernel/power/qos.c                     |   88 ++++++++++++++++----------------
+ 6 files changed, 59 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
+index b3ca389..fba6c64 100644
+--- a/drivers/media/video/via-camera.c
++++ b/drivers/media/video/via-camera.c
+@@ -69,7 +69,7 @@ struct via_camera {
+       struct mutex lock;
+       enum viacam_opstate opstate;
+       unsigned long flags;
+-      struct pm_qos_request_list qos_request;
++      struct pm_qos_request qos_request;
+       /*
+        * GPIO info for power/reset management
+        */
+diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
+index d9df575..f323ec0 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/ipw2x00/ipw2100.c
+@@ -174,7 +174,7 @@ that only one external action is invoked at a time.
+ #define DRV_DESCRIPTION       "Intel(R) PRO/Wireless 2100 Network Driver"
+ #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
+-static struct pm_qos_request_list ipw2100_pm_qos_req;
++static struct pm_qos_request ipw2100_pm_qos_req;
+ /* Debugging stuff */
+ #ifdef CONFIG_IPW2100_DEBUG
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 6155893..2ee4623 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -999,7 +999,7 @@ struct net_device {
+        */
+       char                    name[IFNAMSIZ];
+-      struct pm_qos_request_list pm_qos_req;
++      struct pm_qos_request   pm_qos_req;
+       /* device name hash chain */
+       struct hlist_node       name_hlist;
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 7ba67541..6b0968f 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -20,30 +20,30 @@
+ #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE      (2000 * USEC_PER_SEC)
+ #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE       0
+-struct pm_qos_request_list {
+-      struct plist_node list;
++struct pm_qos_request {
++      struct plist_node node;
+       int pm_qos_class;
+ };
+ #ifdef CONFIG_PM
+-void pm_qos_add_request(struct pm_qos_request_list *l,
+-                      int pm_qos_class, s32 value);
+-void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
++void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
++                      s32 value);
++void pm_qos_update_request(struct pm_qos_request *req,
+                          s32 new_value);
+-void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
++void pm_qos_remove_request(struct pm_qos_request *req);
+ int pm_qos_request(int pm_qos_class);
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+-int pm_qos_request_active(struct pm_qos_request_list *req);
++int pm_qos_request_active(struct pm_qos_request *req);
+ #else
+-static inline void pm_qos_add_request(struct pm_qos_request_list *l,
++static inline void pm_qos_add_request(struct pm_qos_request *req,
+                                     int pm_qos_class, s32 value)
+                       { return; }
+-static inline void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
++static inline void pm_qos_update_request(struct pm_qos_request *req,
+                                        s32 new_value)
+                       { return; }
+-static inline void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
++static inline void pm_qos_remove_request(struct pm_qos_request *req)
+                       { return; }
+ static inline int pm_qos_request(int pm_qos_class)
+@@ -54,7 +54,7 @@ static inline int pm_qos_add_notifier(int pm_qos_class,
+ static inline int pm_qos_remove_notifier(int pm_qos_class,
+                                        struct notifier_block *notifier)
+                       { return 0; }
+-static inline int pm_qos_request_active(struct pm_qos_request_list *req)
++static inline int pm_qos_request_active(struct pm_qos_request *req)
+                       { return 0; }
+ #endif
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index 1204f17..d3b068f 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -373,7 +373,7 @@ struct snd_pcm_substream {
+       int number;
+       char name[32];                  /* substream name */
+       int stream;                     /* stream (direction) */
+-      struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */
++      struct pm_qos_request latency_pm_qos_req; /* pm_qos request */
+       size_t buffer_bytes_max;        /* limit ring buffer size */
+       struct snd_dma_buffer dma_buffer;
+       unsigned int dma_buf_id;
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 61b4738..aa52c44 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -45,7 +45,7 @@
+ #include <linux/uaccess.h>
+ /*
+- * locking rule: all changes to requests or notifiers lists
++ * locking rule: all changes to constraints or notifiers lists
+  * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+  * held, taken with _irqsave.  One lock to rule them all
+  */
+@@ -60,7 +60,7 @@ enum pm_qos_type {
+  * types linux supports for 32 bit quantites
+  */
+ struct pm_qos_object {
+-      struct plist_head requests;
++      struct plist_head constraints;
+       struct blocking_notifier_head *notifiers;
+       struct miscdevice pm_qos_power_miscdev;
+       char *name;
+@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pm_qos_lock);
+ static struct pm_qos_object null_pm_qos;
+ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+ static struct pm_qos_object cpu_dma_pm_qos = {
+-      .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
++      .constraints = PLIST_HEAD_INIT(cpu_dma_pm_qos.constraints),
+       .notifiers = &cpu_dma_lat_notifier,
+       .name = "cpu_dma_latency",
+       .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+@@ -84,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
+ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+ static struct pm_qos_object network_lat_pm_qos = {
+-      .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
++      .constraints = PLIST_HEAD_INIT(network_lat_pm_qos.constraints),
+       .notifiers = &network_lat_notifier,
+       .name = "network_latency",
+       .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+@@ -95,7 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
+ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+ static struct pm_qos_object network_throughput_pm_qos = {
+-      .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
++      .constraints = PLIST_HEAD_INIT(network_throughput_pm_qos.constraints),
+       .notifiers = &network_throughput_notifier,
+       .name = "network_throughput",
+       .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+@@ -129,15 +129,15 @@ static const struct file_operations pm_qos_power_fops = {
+ /* unlocked internal variant */
+ static inline int pm_qos_get_value(struct pm_qos_object *o)
+ {
+-      if (plist_head_empty(&o->requests))
++      if (plist_head_empty(&o->constraints))
+               return o->default_value;
+       switch (o->type) {
+       case PM_QOS_MIN:
+-              return plist_first(&o->requests)->prio;
++              return plist_first(&o->constraints)->prio;
+       case PM_QOS_MAX:
+-              return plist_last(&o->requests)->prio;
++              return plist_last(&o->constraints)->prio;
+       default:
+               /* runtime check for not using enum */
+@@ -170,13 +170,13 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+                * with new value and add, then see if the extremal
+                * changed
+                */
+-              plist_del(node, &o->requests);
++              plist_del(node, &o->constraints);
+               plist_node_init(node, value);
+-              plist_add(node, &o->requests);
++              plist_add(node, &o->constraints);
+       } else if (del) {
+-              plist_del(node, &o->requests);
++              plist_del(node, &o->constraints);
+       } else {
+-              plist_add(node, &o->requests);
++              plist_add(node, &o->constraints);
+       }
+       curr_value = pm_qos_get_value(o);
+       pm_qos_set_value(o, curr_value);
+@@ -222,7 +222,7 @@ int pm_qos_request(int pm_qos_class)
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_request);
+-int pm_qos_request_active(struct pm_qos_request_list *req)
++int pm_qos_request_active(struct pm_qos_request *req)
+ {
+       return req->pm_qos_class != 0;
+ }
+@@ -230,24 +230,24 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
+ /**
+  * pm_qos_add_request - inserts new qos request into the list
+- * @dep: pointer to a preallocated handle
++ * @req: pointer to a preallocated handle
+  * @pm_qos_class: identifies which list of qos request to use
+  * @value: defines the qos request
+  *
+  * This function inserts a new entry in the pm_qos_class list of requested qos
+  * performance characteristics.  It recomputes the aggregate QoS expectations
+- * for the pm_qos_class of parameters and initializes the pm_qos_request_list
++ * for the pm_qos_class of parameters and initializes the pm_qos_request
+  * handle.  Caller needs to save this handle for later use in updates and
+  * removal.
+  */
+-void pm_qos_add_request(struct pm_qos_request_list *dep,
++void pm_qos_add_request(struct pm_qos_request *req,
+                       int pm_qos_class, s32 value)
+ {
+       struct pm_qos_object *o =  pm_qos_array[pm_qos_class];
+       int new_value;
+-      if (pm_qos_request_active(dep)) {
++      if (pm_qos_request_active(req)) {
+               WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
+               return;
+       }
+@@ -255,15 +255,15 @@ void pm_qos_add_request(struct pm_qos_request_list *dep,
+               new_value = o->default_value;
+       else
+               new_value = value;
+-      plist_node_init(&dep->list, new_value);
+-      dep->pm_qos_class = pm_qos_class;
+-      update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE);
++      plist_node_init(&req->node, new_value);
++      req->pm_qos_class = pm_qos_class;
++      update_target(o, &req->node, 0, PM_QOS_DEFAULT_VALUE);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_add_request);
+ /**
+  * pm_qos_update_request - modifies an existing qos request
+- * @pm_qos_req : handle to list element holding a pm_qos request to use
++ * @req : handle to list element holding a pm_qos request to use
+  * @value: defines the qos request
+  *
+  * Updates an existing qos request for the pm_qos_class of parameters along
+@@ -271,56 +271,56 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request);
+  *
+  * Attempts are made to make this code callable on hot code paths.
+  */
+-void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
++void pm_qos_update_request(struct pm_qos_request *req,
+                          s32 new_value)
+ {
+       s32 temp;
+       struct pm_qos_object *o;
+-      if (!pm_qos_req) /*guard against callers passing in null */
++      if (!req) /*guard against callers passing in null */
+               return;
+-      if (!pm_qos_request_active(pm_qos_req)) {
++      if (!pm_qos_request_active(req)) {
+               WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
+               return;
+       }
+-      o = pm_qos_array[pm_qos_req->pm_qos_class];
++      o = pm_qos_array[req->pm_qos_class];
+       if (new_value == PM_QOS_DEFAULT_VALUE)
+               temp = o->default_value;
+       else
+               temp = new_value;
+-      if (temp != pm_qos_req->list.prio)
+-              update_target(o, &pm_qos_req->list, 0, temp);
++      if (temp != req->node.prio)
++              update_target(o, &req->node, 0, temp);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_update_request);
+ /**
+  * pm_qos_remove_request - modifies an existing qos request
+- * @pm_qos_req: handle to request list element
++ * @req: handle to request list element
+  *
+- * Will remove pm qos request from the list of requests and
++ * Will remove pm qos request from the list of constraints and
+  * recompute the current target value for the pm_qos_class.  Call this
+  * on slow code paths.
+  */
+-void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
++void pm_qos_remove_request(struct pm_qos_request *req)
+ {
+       struct pm_qos_object *o;
+-      if (pm_qos_req == NULL)
++      if (req == NULL)
+               return;
+               /* silent return to keep pcm code cleaner */
+-      if (!pm_qos_request_active(pm_qos_req)) {
++      if (!pm_qos_request_active(req)) {
+               WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+               return;
+       }
+-      o = pm_qos_array[pm_qos_req->pm_qos_class];
+-      update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE);
+-      memset(pm_qos_req, 0, sizeof(*pm_qos_req));
++      o = pm_qos_array[req->pm_qos_class];
++      update_target(o, &req->node, 1, PM_QOS_DEFAULT_VALUE);
++      memset(req, 0, sizeof(*req));
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_remove_request);
+@@ -368,7 +368,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
+       pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
+       if (pm_qos_class >= 0) {
+-               struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
++              struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
+               if (!req)
+                       return -ENOMEM;
+@@ -383,7 +383,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
+ static int pm_qos_power_release(struct inode *inode, struct file *filp)
+ {
+-      struct pm_qos_request_list *req;
++      struct pm_qos_request *req;
+       req = filp->private_data;
+       pm_qos_remove_request(req);
+@@ -399,14 +399,14 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+       s32 value;
+       unsigned long flags;
+       struct pm_qos_object *o;
+-      struct pm_qos_request_list *pm_qos_req = filp->private_data;
++      struct pm_qos_request *req = filp->private_data;
+-      if (!pm_qos_req)
++      if (!req)
+               return -EINVAL;
+-      if (!pm_qos_request_active(pm_qos_req))
++      if (!pm_qos_request_active(req))
+               return -EINVAL;
+-      o = pm_qos_array[pm_qos_req->pm_qos_class];
++      o = pm_qos_array[req->pm_qos_class];
+       spin_lock_irqsave(&pm_qos_lock, flags);
+       value = pm_qos_get_value(o);
+       spin_unlock_irqrestore(&pm_qos_lock, flags);
+@@ -418,7 +418,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+               size_t count, loff_t *f_pos)
+ {
+       s32 value;
+-      struct pm_qos_request_list *pm_qos_req;
++      struct pm_qos_request *req;
+       if (count == sizeof(s32)) {
+               if (copy_from_user(&value, buf, sizeof(s32)))
+@@ -449,8 +449,8 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+               return -EINVAL;
+       }
+-      pm_qos_req = filp->private_data;
+-      pm_qos_update_request(pm_qos_req, value);
++      req = filp->private_data;
++      pm_qos_update_request(req, value);
+       return count;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0057-PM-QoS-Code-reorganization.patch b/patches.runtime_pm/0057-PM-QoS-Code-reorganization.patch
new file mode 100644 (file)
index 0000000..be99e35
--- /dev/null
@@ -0,0 +1,85 @@
+From e578bf69019f6f236f041c77b8aa825b22deb887 Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:20 +0200
+Subject: PM QoS: Code reorganization
+
+Move around the PM QoS misc devices management code
+for better readability.
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Acked-by: markgross <markgross@thegnar.org>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 4a31a33425a1eb92f6a0b9846f081842268361c8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/qos.c |   45 +++++++++++++++++++++++----------------------
+ 1 file changed, 23 insertions(+), 22 deletions(-)
+
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index aa52c44..788c4cf 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -188,28 +188,6 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+                                            NULL);
+ }
+-static int register_pm_qos_misc(struct pm_qos_object *qos)
+-{
+-      qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+-      qos->pm_qos_power_miscdev.name = qos->name;
+-      qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+-
+-      return misc_register(&qos->pm_qos_power_miscdev);
+-}
+-
+-static int find_pm_qos_object_by_minor(int minor)
+-{
+-      int pm_qos_class;
+-
+-      for (pm_qos_class = 0;
+-              pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
+-              if (minor ==
+-                      pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
+-                      return pm_qos_class;
+-      }
+-      return -1;
+-}
+-
+ /**
+  * pm_qos_request - returns current system wide qos expectation
+  * @pm_qos_class: identification of which qos value is requested
+@@ -362,6 +340,29 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
++/* User space interface to PM QoS classes via misc devices */
++static int register_pm_qos_misc(struct pm_qos_object *qos)
++{
++      qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
++      qos->pm_qos_power_miscdev.name = qos->name;
++      qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
++
++      return misc_register(&qos->pm_qos_power_miscdev);
++}
++
++static int find_pm_qos_object_by_minor(int minor)
++{
++      int pm_qos_class;
++
++      for (pm_qos_class = 0;
++              pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
++              if (minor ==
++                      pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
++                      return pm_qos_class;
++      }
++      return -1;
++}
++
+ static int pm_qos_power_open(struct inode *inode, struct file *filp)
+ {
+       long pm_qos_class;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0058-PM-QoS-Reorganize-data-structs.patch b/patches.runtime_pm/0058-PM-QoS-Reorganize-data-structs.patch
new file mode 100644 (file)
index 0000000..3f31646
--- /dev/null
@@ -0,0 +1,243 @@
+From 8fdac1918808653bdd2697dc040bb1153d8b94f3 Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:27 +0200
+Subject: PM QoS: Reorganize data structs
+
+In preparation for the per-device constratins support, re-organize
+the data strctures:
+ - add a struct pm_qos_constraints which contains the constraints
+ related data
+ - update struct pm_qos_object contents to the PM QoS internal object
+ data. Add a pointer to struct pm_qos_constraints
+ - update the internal code to use the new data structs.
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 4e1779baaa542c83b459b0a56585e0c1a04c7782)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_qos.h |   19 +++++++++++
+ kernel/power/qos.c     |   85 +++++++++++++++++++++++-------------------------
+ 2 files changed, 60 insertions(+), 44 deletions(-)
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 6b0968f..9772311 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -25,6 +25,25 @@ struct pm_qos_request {
+       int pm_qos_class;
+ };
++enum pm_qos_type {
++      PM_QOS_UNITIALIZED,
++      PM_QOS_MAX,             /* return the largest value */
++      PM_QOS_MIN              /* return the smallest value */
++};
++
++/*
++ * Note: The lockless read path depends on the CPU accessing
++ * target_value atomically.  Atomic access is only guaranteed on all CPU
++ * types linux supports for 32 bit quantites
++ */
++struct pm_qos_constraints {
++      struct plist_head list;
++      s32 target_value;       /* Do not change to 64 bit */
++      s32 default_value;
++      enum pm_qos_type type;
++      struct blocking_notifier_head *notifiers;
++};
++
+ #ifdef CONFIG_PM
+ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+                       s32 value);
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 788c4cf..4a35fe5 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -49,58 +49,53 @@
+  * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+  * held, taken with _irqsave.  One lock to rule them all
+  */
+-enum pm_qos_type {
+-      PM_QOS_MAX,             /* return the largest value */
+-      PM_QOS_MIN              /* return the smallest value */
+-};
+-
+-/*
+- * Note: The lockless read path depends on the CPU accessing
+- * target_value atomically.  Atomic access is only guaranteed on all CPU
+- * types linux supports for 32 bit quantites
+- */
+ struct pm_qos_object {
+-      struct plist_head constraints;
+-      struct blocking_notifier_head *notifiers;
++      struct pm_qos_constraints *constraints;
+       struct miscdevice pm_qos_power_miscdev;
+       char *name;
+-      s32 target_value;       /* Do not change to 64 bit */
+-      s32 default_value;
+-      enum pm_qos_type type;
+ };
+ static DEFINE_SPINLOCK(pm_qos_lock);
+ static struct pm_qos_object null_pm_qos;
++
+ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+-static struct pm_qos_object cpu_dma_pm_qos = {
+-      .constraints = PLIST_HEAD_INIT(cpu_dma_pm_qos.constraints),
+-      .notifiers = &cpu_dma_lat_notifier,
+-      .name = "cpu_dma_latency",
++static struct pm_qos_constraints cpu_dma_constraints = {
++      .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
+       .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+       .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+       .type = PM_QOS_MIN,
++      .notifiers = &cpu_dma_lat_notifier,
++};
++static struct pm_qos_object cpu_dma_pm_qos = {
++      .constraints = &cpu_dma_constraints,
+ };
+ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+-static struct pm_qos_object network_lat_pm_qos = {
+-      .constraints = PLIST_HEAD_INIT(network_lat_pm_qos.constraints),
+-      .notifiers = &network_lat_notifier,
+-      .name = "network_latency",
++static struct pm_qos_constraints network_lat_constraints = {
++      .list = PLIST_HEAD_INIT(network_lat_constraints.list),
+       .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+       .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+-      .type = PM_QOS_MIN
++      .type = PM_QOS_MIN,
++      .notifiers = &network_lat_notifier,
++};
++static struct pm_qos_object network_lat_pm_qos = {
++      .constraints = &network_lat_constraints,
++      .name = "network_latency",
+ };
+ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+-static struct pm_qos_object network_throughput_pm_qos = {
+-      .constraints = PLIST_HEAD_INIT(network_throughput_pm_qos.constraints),
+-      .notifiers = &network_throughput_notifier,
+-      .name = "network_throughput",
++static struct pm_qos_constraints network_tput_constraints = {
++      .list = PLIST_HEAD_INIT(network_tput_constraints.list),
+       .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+       .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+       .type = PM_QOS_MAX,
++      .notifiers = &network_throughput_notifier,
++};
++static struct pm_qos_object network_throughput_pm_qos = {
++      .constraints = &network_tput_constraints,
++      .name = "network_throughput",
+ };
+@@ -129,15 +124,15 @@ static const struct file_operations pm_qos_power_fops = {
+ /* unlocked internal variant */
+ static inline int pm_qos_get_value(struct pm_qos_object *o)
+ {
+-      if (plist_head_empty(&o->constraints))
+-              return o->default_value;
++      if (plist_head_empty(&o->constraints->list))
++              return o->constraints->default_value;
+-      switch (o->type) {
++      switch (o->constraints->type) {
+       case PM_QOS_MIN:
+-              return plist_first(&o->constraints)->prio;
++              return plist_first(&o->constraints->list)->prio;
+       case PM_QOS_MAX:
+-              return plist_last(&o->constraints)->prio;
++              return plist_last(&o->constraints->list)->prio;
+       default:
+               /* runtime check for not using enum */
+@@ -147,12 +142,12 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
+ static inline s32 pm_qos_read_value(struct pm_qos_object *o)
+ {
+-      return o->target_value;
++      return o->constraints->target_value;
+ }
+ static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
+ {
+-      o->target_value = value;
++      o->constraints->target_value = value;
+ }
+ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+@@ -170,20 +165,20 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+                * with new value and add, then see if the extremal
+                * changed
+                */
+-              plist_del(node, &o->constraints);
++              plist_del(node, &o->constraints->list);
+               plist_node_init(node, value);
+-              plist_add(node, &o->constraints);
++              plist_add(node, &o->constraints->list);
+       } else if (del) {
+-              plist_del(node, &o->constraints);
++              plist_del(node, &o->constraints->list);
+       } else {
+-              plist_add(node, &o->constraints);
++              plist_add(node, &o->constraints->list);
+       }
+       curr_value = pm_qos_get_value(o);
+       pm_qos_set_value(o, curr_value);
+       spin_unlock_irqrestore(&pm_qos_lock, flags);
+       if (prev_value != curr_value)
+-              blocking_notifier_call_chain(o->notifiers,
++              blocking_notifier_call_chain(o->constraints->notifiers,
+                                            (unsigned long)curr_value,
+                                            NULL);
+ }
+@@ -230,7 +225,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
+               return;
+       }
+       if (value == PM_QOS_DEFAULT_VALUE)
+-              new_value = o->default_value;
++              new_value = o->constraints->default_value;
+       else
+               new_value = value;
+       plist_node_init(&req->node, new_value);
+@@ -266,7 +261,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
+       o = pm_qos_array[req->pm_qos_class];
+       if (new_value == PM_QOS_DEFAULT_VALUE)
+-              temp = o->default_value;
++              temp = o->constraints->default_value;
+       else
+               temp = new_value;
+@@ -315,7 +310,8 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+       int retval;
+       retval = blocking_notifier_chain_register(
+-                      pm_qos_array[pm_qos_class]->notifiers, notifier);
++                      pm_qos_array[pm_qos_class]->constraints->notifiers,
++                      notifier);
+       return retval;
+ }
+@@ -334,7 +330,8 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+       int retval;
+       retval = blocking_notifier_chain_unregister(
+-                      pm_qos_array[pm_qos_class]->notifiers, notifier);
++                      pm_qos_array[pm_qos_class]->constraints->notifiers,
++                      notifier);
+       return retval;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0059-PM-QoS-Generalize-and-export-constraints-management-.patch b/patches.runtime_pm/0059-PM-QoS-Generalize-and-export-constraints-management-.patch
new file mode 100644 (file)
index 0000000..12d56fb
--- /dev/null
@@ -0,0 +1,292 @@
+From 47215ada657966a1f3484a5cdbb480d4ba44c10e Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:34 +0200
+Subject: PM QoS: Generalize and export constraints management code
+
+In preparation for the per-device constratins support:
+ - rename update_target to pm_qos_update_target
+ - generalize and export pm_qos_update_target for usage by the upcoming
+   per-device latency constraints framework:
+   * operate on struct pm_qos_constraints for constraints management,
+   * introduce an 'action' parameter for constraints add/update/remove,
+   * the return value indicates if the aggregated constraint value has
+     changed,
+ - update the internal code to operate on struct pm_qos_constraints
+ - add a NULL pointer check in the API functions
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit abe98ec2d86279fe821c9051003a0abc43444f15)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_qos.h |   14 ++++++
+ kernel/power/qos.c     |  123 ++++++++++++++++++++++++++----------------------
+ 2 files changed, 81 insertions(+), 56 deletions(-)
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 9772311..84aa150 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -44,7 +44,16 @@ struct pm_qos_constraints {
+       struct blocking_notifier_head *notifiers;
+ };
++/* Action requested to pm_qos_update_target */
++enum pm_qos_req_action {
++      PM_QOS_ADD_REQ,         /* Add a new request */
++      PM_QOS_UPDATE_REQ,      /* Update an existing request */
++      PM_QOS_REMOVE_REQ       /* Remove an existing request */
++};
++
+ #ifdef CONFIG_PM
++int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
++                       enum pm_qos_req_action action, int value);
+ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+                       s32 value);
+ void pm_qos_update_request(struct pm_qos_request *req,
+@@ -56,6 +65,11 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request *req);
+ #else
++static inline int pm_qos_update_target(struct pm_qos_constraints *c,
++                                     struct plist_node *node,
++                                     enum pm_qos_req_action action,
++                                     int value)
++                      { return 0; }
+ static inline void pm_qos_add_request(struct pm_qos_request *req,
+                                     int pm_qos_class, s32 value)
+                       { return; }
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 4a35fe5..7c7cd18 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -122,17 +122,17 @@ static const struct file_operations pm_qos_power_fops = {
+ };
+ /* unlocked internal variant */
+-static inline int pm_qos_get_value(struct pm_qos_object *o)
++static inline int pm_qos_get_value(struct pm_qos_constraints *c)
+ {
+-      if (plist_head_empty(&o->constraints->list))
+-              return o->constraints->default_value;
++      if (plist_head_empty(&c->list))
++              return c->default_value;
+-      switch (o->constraints->type) {
++      switch (c->type) {
+       case PM_QOS_MIN:
+-              return plist_first(&o->constraints->list)->prio;
++              return plist_first(&c->list)->prio;
+       case PM_QOS_MAX:
+-              return plist_last(&o->constraints->list)->prio;
++              return plist_last(&c->list)->prio;
+       default:
+               /* runtime check for not using enum */
+@@ -140,47 +140,73 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
+       }
+ }
+-static inline s32 pm_qos_read_value(struct pm_qos_object *o)
++static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
+ {
+-      return o->constraints->target_value;
++      return c->target_value;
+ }
+-static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
++static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
+ {
+-      o->constraints->target_value = value;
++      c->target_value = value;
+ }
+-static void update_target(struct pm_qos_object *o, struct plist_node *node,
+-                        int del, int value)
++/**
++ * pm_qos_update_target - manages the constraints list and calls the notifiers
++ *  if needed
++ * @c: constraints data struct
++ * @node: request to add to the list, to update or to remove
++ * @action: action to take on the constraints list
++ * @value: value of the request to add or update
++ *
++ * This function returns 1 if the aggregated constraint value has changed, 0
++ *  otherwise.
++ */
++int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
++                       enum pm_qos_req_action action, int value)
+ {
+       unsigned long flags;
+-      int prev_value, curr_value;
++      int prev_value, curr_value, new_value;
+       spin_lock_irqsave(&pm_qos_lock, flags);
+-      prev_value = pm_qos_get_value(o);
+-      /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */
+-      if (value != PM_QOS_DEFAULT_VALUE) {
++      prev_value = pm_qos_get_value(c);
++      if (value == PM_QOS_DEFAULT_VALUE)
++              new_value = c->default_value;
++      else
++              new_value = value;
++
++      switch (action) {
++      case PM_QOS_REMOVE_REQ:
++              plist_del(node, &c->list);
++              break;
++      case PM_QOS_UPDATE_REQ:
+               /*
+                * to change the list, we atomically remove, reinit
+                * with new value and add, then see if the extremal
+                * changed
+                */
+-              plist_del(node, &o->constraints->list);
+-              plist_node_init(node, value);
+-              plist_add(node, &o->constraints->list);
+-      } else if (del) {
+-              plist_del(node, &o->constraints->list);
+-      } else {
+-              plist_add(node, &o->constraints->list);
++              plist_del(node, &c->list);
++      case PM_QOS_ADD_REQ:
++              plist_node_init(node, new_value);
++              plist_add(node, &c->list);
++              break;
++      default:
++              /* no action */
++              ;
+       }
+-      curr_value = pm_qos_get_value(o);
+-      pm_qos_set_value(o, curr_value);
++
++      curr_value = pm_qos_get_value(c);
++      pm_qos_set_value(c, curr_value);
++
+       spin_unlock_irqrestore(&pm_qos_lock, flags);
+-      if (prev_value != curr_value)
+-              blocking_notifier_call_chain(o->constraints->notifiers,
++      if (prev_value != curr_value) {
++              blocking_notifier_call_chain(c->notifiers,
+                                            (unsigned long)curr_value,
+                                            NULL);
++              return 1;
++      } else {
++              return 0;
++      }
+ }
+ /**
+@@ -191,7 +217,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+  */
+ int pm_qos_request(int pm_qos_class)
+ {
+-      return pm_qos_read_value(pm_qos_array[pm_qos_class]);
++      return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_request);
+@@ -217,20 +243,16 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
+ void pm_qos_add_request(struct pm_qos_request *req,
+                       int pm_qos_class, s32 value)
+ {
+-      struct pm_qos_object *o =  pm_qos_array[pm_qos_class];
+-      int new_value;
++      if (!req) /*guard against callers passing in null */
++              return;
+       if (pm_qos_request_active(req)) {
+               WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
+               return;
+       }
+-      if (value == PM_QOS_DEFAULT_VALUE)
+-              new_value = o->constraints->default_value;
+-      else
+-              new_value = value;
+-      plist_node_init(&req->node, new_value);
+       req->pm_qos_class = pm_qos_class;
+-      update_target(o, &req->node, 0, PM_QOS_DEFAULT_VALUE);
++      pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
++                           &req->node, PM_QOS_ADD_REQ, value);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_add_request);
+@@ -247,9 +269,6 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request);
+ void pm_qos_update_request(struct pm_qos_request *req,
+                          s32 new_value)
+ {
+-      s32 temp;
+-      struct pm_qos_object *o;
+-
+       if (!req) /*guard against callers passing in null */
+               return;
+@@ -258,15 +277,10 @@ void pm_qos_update_request(struct pm_qos_request *req,
+               return;
+       }
+-      o = pm_qos_array[req->pm_qos_class];
+-
+-      if (new_value == PM_QOS_DEFAULT_VALUE)
+-              temp = o->constraints->default_value;
+-      else
+-              temp = new_value;
+-
+-      if (temp != req->node.prio)
+-              update_target(o, &req->node, 0, temp);
++      if (new_value != req->node.prio)
++              pm_qos_update_target(
++                      pm_qos_array[req->pm_qos_class]->constraints,
++                      &req->node, PM_QOS_UPDATE_REQ, new_value);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_update_request);
+@@ -280,9 +294,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_request);
+  */
+ void pm_qos_remove_request(struct pm_qos_request *req)
+ {
+-      struct pm_qos_object *o;
+-
+-      if (req == NULL)
++      if (!req) /*guard against callers passing in null */
+               return;
+               /* silent return to keep pcm code cleaner */
+@@ -291,8 +303,9 @@ void pm_qos_remove_request(struct pm_qos_request *req)
+               return;
+       }
+-      o = pm_qos_array[req->pm_qos_class];
+-      update_target(o, &req->node, 1, PM_QOS_DEFAULT_VALUE);
++      pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
++                           &req->node, PM_QOS_REMOVE_REQ,
++                           PM_QOS_DEFAULT_VALUE);
+       memset(req, 0, sizeof(*req));
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_remove_request);
+@@ -396,7 +409,6 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+ {
+       s32 value;
+       unsigned long flags;
+-      struct pm_qos_object *o;
+       struct pm_qos_request *req = filp->private_data;
+       if (!req)
+@@ -404,9 +416,8 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
+       if (!pm_qos_request_active(req))
+               return -EINVAL;
+-      o = pm_qos_array[req->pm_qos_class];
+       spin_lock_irqsave(&pm_qos_lock, flags);
+-      value = pm_qos_get_value(o);
++      value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
+       spin_unlock_irqrestore(&pm_qos_lock, flags);
+       return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0060-PM-QoS-Implement-per-device-PM-QoS-constraints.patch b/patches.runtime_pm/0060-PM-QoS-Implement-per-device-PM-QoS-constraints.patch
new file mode 100644 (file)
index 0000000..1aded4a
--- /dev/null
@@ -0,0 +1,535 @@
+From 0b904050d49167289a67d5612940af6e50b19df1 Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:41 +0200
+Subject: PM QoS: Implement per-device PM QoS constraints
+
+Implement the per-device PM QoS constraints by creating a device
+PM QoS API, which calls the PM QoS constraints management core code.
+
+The per-device latency constraints data strctures are stored
+in the device dev_pm_info struct.
+
+The device PM code calls the init and destroy of the per-device constraints
+data struct in order to support the dynamic insertion and removal of the
+devices in the system.
+
+To minimize the data usage by the per-device constraints, the data struct
+is only allocated at the first call to dev_pm_qos_add_request.
+The data is later free'd when the device is removed from the system.
+A global mutex protects the constraints users from the data being
+allocated and free'd.
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 91ff4cb803df6de9114351b9f2f0f39f397ee03e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/Makefile |    4 +-
+ drivers/base/power/main.c   |    3 +
+ drivers/base/power/qos.c    |  338 +++++++++++++++++++++++++++++++++++++++++++
+ include/linux/pm.h          |    9 ++
+ include/linux/pm_qos.h      |   42 ++++++
+ 5 files changed, 394 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/base/power/qos.c
+
+diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
+index 6488ce1..81676dd 100644
+--- a/drivers/base/power/Makefile
++++ b/drivers/base/power/Makefile
+@@ -1,4 +1,4 @@
+-obj-$(CONFIG_PM)      += sysfs.o generic_ops.o common.o
++obj-$(CONFIG_PM)      += sysfs.o generic_ops.o common.o qos.o
+ obj-$(CONFIG_PM_SLEEP)        += main.o wakeup.o
+ obj-$(CONFIG_PM_RUNTIME)      += runtime.o
+ obj-$(CONFIG_PM_TRACE_RTC)    += trace.o
+@@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP)   += opp.o
+ obj-$(CONFIG_PM_GENERIC_DOMAINS)      +=  domain.o
+ obj-$(CONFIG_HAVE_CLK)        += clock_ops.o
+-ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+\ No newline at end of file
++ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index a854591..956443f 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -22,6 +22,7 @@
+ #include <linux/mutex.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
++#include <linux/pm_qos.h>
+ #include <linux/resume-trace.h>
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+@@ -97,6 +98,7 @@ void device_pm_add(struct device *dev)
+                       dev_name(dev->parent));
+       list_add_tail(&dev->power.entry, &dpm_list);
+       mutex_unlock(&dpm_list_mtx);
++      dev_pm_qos_constraints_init(dev);
+ }
+ /**
+@@ -107,6 +109,7 @@ void device_pm_remove(struct device *dev)
+ {
+       pr_debug("PM: Removing info for %s:%s\n",
+                dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
++      dev_pm_qos_constraints_destroy(dev);
+       complete_all(&dev->power.completion);
+       mutex_lock(&dpm_list_mtx);
+       list_del_init(&dev->power.entry);
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+new file mode 100644
+index 0000000..cc4c541
+--- /dev/null
++++ b/drivers/base/power/qos.c
+@@ -0,0 +1,338 @@
++/*
++ * Devices PM QoS constraints management
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ *
++ * This module exposes the interface to kernel space for specifying
++ * per-device PM QoS dependencies. It provides infrastructure for registration
++ * of:
++ *
++ * Dependents on a QoS value : register requests
++ * Watchers of QoS value : get notified when target QoS value changes
++ *
++ * This QoS design is best effort based. Dependents register their QoS needs.
++ * Watchers register to keep track of the current QoS needs of the system.
++ *
++ * Note about the per-device constraint data struct allocation:
++ * . The per-device constraints data struct ptr is tored into the device
++ *    dev_pm_info.
++ * . To minimize the data usage by the per-device constraints, the data struct
++ *   is only allocated at the first call to dev_pm_qos_add_request.
++ * . The data is later free'd when the device is removed from the system.
++ * . The constraints_state variable from dev_pm_info tracks the data struct
++ *    allocation state:
++ *    DEV_PM_QOS_NO_DEVICE: No device present or device removed, no data
++ *     allocated,
++ *    DEV_PM_QOS_DEVICE_PRESENT: Device present, data not allocated and will be
++ *     allocated at the first call to dev_pm_qos_add_request,
++ *    DEV_PM_QOS_ALLOCATED: Device present, data allocated. The per-device
++ *     PM QoS constraints framework is operational and constraints can be
++ *     added, updated or removed using the dev_pm_qos_* API.
++ *  . A global mutex protects the constraints users from the data being
++ *     allocated and free'd.
++ */
++
++#include <linux/pm_qos.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/mutex.h>
++
++
++static DEFINE_MUTEX(dev_pm_qos_mtx);
++
++/*
++ * dev_pm_qos_constraints_allocate
++ * @dev: device to allocate data for
++ *
++ * Called at the first call to add_request, for constraint data allocation
++ * Must be called with the dev_pm_qos_mtx mutex held
++ */
++static int dev_pm_qos_constraints_allocate(struct device *dev)
++{
++      struct pm_qos_constraints *c;
++      struct blocking_notifier_head *n;
++
++      c = kzalloc(sizeof(*c), GFP_KERNEL);
++      if (!c)
++              return -ENOMEM;
++
++      n = kzalloc(sizeof(*n), GFP_KERNEL);
++      if (!n) {
++              kfree(c);
++              return -ENOMEM;
++      }
++      BLOCKING_INIT_NOTIFIER_HEAD(n);
++
++      dev->power.constraints = c;
++      plist_head_init(&dev->power.constraints->list);
++      dev->power.constraints->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
++      dev->power.constraints->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
++      dev->power.constraints->type = PM_QOS_MIN;
++      dev->power.constraints->notifiers = n;
++      dev->power.constraints_state = DEV_PM_QOS_ALLOCATED;
++
++      return 0;
++}
++
++/**
++ * dev_pm_qos_constraints_init
++ * @dev: target device
++ *
++ * Called from the device PM subsystem at device insertion
++ */
++void dev_pm_qos_constraints_init(struct device *dev)
++{
++      mutex_lock(&dev_pm_qos_mtx);
++      dev->power.constraints_state = DEV_PM_QOS_DEVICE_PRESENT;
++      mutex_unlock(&dev_pm_qos_mtx);
++}
++
++/**
++ * dev_pm_qos_constraints_destroy
++ * @dev: target device
++ *
++ * Called from the device PM subsystem at device removal
++ */
++void dev_pm_qos_constraints_destroy(struct device *dev)
++{
++      struct dev_pm_qos_request *req, *tmp;
++
++      mutex_lock(&dev_pm_qos_mtx);
++
++      if (dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
++              /* Flush the constraints list for the device */
++              plist_for_each_entry_safe(req, tmp,
++                                        &dev->power.constraints->list,
++                                        node) {
++                      /*
++                       * Update constraints list and call the per-device
++                       * callbacks if needed
++                       */
++                      pm_qos_update_target(req->dev->power.constraints,
++                                         &req->node, PM_QOS_REMOVE_REQ,
++                                         PM_QOS_DEFAULT_VALUE);
++                      memset(req, 0, sizeof(*req));
++              }
++
++              kfree(dev->power.constraints->notifiers);
++              kfree(dev->power.constraints);
++              dev->power.constraints = NULL;
++      }
++      dev->power.constraints_state = DEV_PM_QOS_NO_DEVICE;
++
++      mutex_unlock(&dev_pm_qos_mtx);
++}
++
++/**
++ * dev_pm_qos_add_request - inserts new qos request into the list
++ * @dev: target device for the constraint
++ * @req: pointer to a preallocated handle
++ * @value: defines the qos request
++ *
++ * This function inserts a new entry in the device constraints list of
++ * requested qos performance characteristics. It recomputes the aggregate
++ * QoS expectations of parameters and initializes the dev_pm_qos_request
++ * handle.  Caller needs to save this handle for later use in updates and
++ * removal.
++ *
++ * Returns 1 if the aggregated constraint value has changed,
++ * 0 if the aggregated constraint value has not changed,
++ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
++ * removed from the system
++ */
++int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
++                          s32 value)
++{
++      int ret = 0;
++
++      if (!dev || !req) /*guard against callers passing in null */
++              return -EINVAL;
++
++      if (dev_pm_qos_request_active(req)) {
++              WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
++                      "added request\n");
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev_pm_qos_mtx);
++      req->dev = dev;
++
++      /* Return if the device has been removed */
++      if (req->dev->power.constraints_state == DEV_PM_QOS_NO_DEVICE) {
++              ret = -ENODEV;
++              goto out;
++      }
++
++      /*
++       * Allocate the constraints data on the first call to add_request,
++       * i.e. only if the data is not already allocated and if the device has
++       * not been removed
++       */
++      if (dev->power.constraints_state == DEV_PM_QOS_DEVICE_PRESENT)
++              ret = dev_pm_qos_constraints_allocate(dev);
++
++      if (!ret)
++              ret = pm_qos_update_target(dev->power.constraints, &req->node,
++                                         PM_QOS_ADD_REQ, value);
++
++out:
++      mutex_unlock(&dev_pm_qos_mtx);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
++
++/**
++ * dev_pm_qos_update_request - modifies an existing qos request
++ * @req : handle to list element holding a dev_pm_qos request to use
++ * @new_value: defines the qos request
++ *
++ * Updates an existing dev PM qos request along with updating the
++ * target value.
++ *
++ * Attempts are made to make this code callable on hot code paths.
++ *
++ * Returns 1 if the aggregated constraint value has changed,
++ * 0 if the aggregated constraint value has not changed,
++ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
++ * removed from the system
++ */
++int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
++                            s32 new_value)
++{
++      int ret = 0;
++
++      if (!req) /*guard against callers passing in null */
++              return -EINVAL;
++
++      if (!dev_pm_qos_request_active(req)) {
++              WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
++                      "unknown object\n");
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev_pm_qos_mtx);
++
++      if (req->dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
++              if (new_value != req->node.prio)
++                      ret = pm_qos_update_target(req->dev->power.constraints,
++                                                 &req->node,
++                                                 PM_QOS_UPDATE_REQ,
++                                                 new_value);
++      } else {
++              /* Return if the device has been removed */
++              ret = -ENODEV;
++      }
++
++      mutex_unlock(&dev_pm_qos_mtx);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
++
++/**
++ * dev_pm_qos_remove_request - modifies an existing qos request
++ * @req: handle to request list element
++ *
++ * Will remove pm qos request from the list of constraints and
++ * recompute the current target value. Call this on slow code paths.
++ *
++ * Returns 1 if the aggregated constraint value has changed,
++ * 0 if the aggregated constraint value has not changed,
++ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
++ * removed from the system
++ */
++int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
++{
++      int ret = 0;
++
++      if (!req) /*guard against callers passing in null */
++              return -EINVAL;
++
++      if (!dev_pm_qos_request_active(req)) {
++              WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
++                      "unknown object\n");
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev_pm_qos_mtx);
++
++      if (req->dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
++              ret = pm_qos_update_target(req->dev->power.constraints,
++                                         &req->node, PM_QOS_REMOVE_REQ,
++                                         PM_QOS_DEFAULT_VALUE);
++              memset(req, 0, sizeof(*req));
++      } else {
++              /* Return if the device has been removed */
++              ret = -ENODEV;
++      }
++
++      mutex_unlock(&dev_pm_qos_mtx);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
++
++/**
++ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
++ * of per-device PM QoS constraints
++ *
++ * @dev: target device for the constraint
++ * @notifier: notifier block managed by caller.
++ *
++ * Will register the notifier into a notification chain that gets called
++ * upon changes to the target value for the device.
++ */
++int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
++{
++      int retval = 0;
++
++      mutex_lock(&dev_pm_qos_mtx);
++
++      /* Silently return if the device has been removed */
++      if (dev->power.constraints_state != DEV_PM_QOS_ALLOCATED)
++              goto out;
++
++      retval = blocking_notifier_chain_register(
++                      dev->power.constraints->notifiers,
++                      notifier);
++
++out:
++      mutex_unlock(&dev_pm_qos_mtx);
++      return retval;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
++
++/**
++ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
++ * of per-device PM QoS constraints
++ *
++ * @dev: target device for the constraint
++ * @notifier: notifier block to be removed.
++ *
++ * Will remove the notifier from the notification chain that gets called
++ * upon changes to the target value.
++ */
++int dev_pm_qos_remove_notifier(struct device *dev,
++                             struct notifier_block *notifier)
++{
++      int retval = 0;
++
++      mutex_lock(&dev_pm_qos_mtx);
++
++      /* Silently return if the device has been removed */
++      if (dev->power.constraints_state != DEV_PM_QOS_ALLOCATED)
++              goto out;
++
++      retval = blocking_notifier_chain_unregister(
++                      dev->power.constraints->notifiers,
++                      notifier);
++
++out:
++      mutex_unlock(&dev_pm_qos_mtx);
++      return retval;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index b17b6aa..c0fc859 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -421,6 +421,13 @@ enum rpm_request {
+       RPM_REQ_RESUME,
+ };
++/* Per-device PM QoS constraints data struct state */
++enum dev_pm_qos_state {
++      DEV_PM_QOS_NO_DEVICE,           /* No device present */
++      DEV_PM_QOS_DEVICE_PRESENT,      /* Device present, data not allocated */
++      DEV_PM_QOS_ALLOCATED,           /* Device present, data allocated */
++};
++
+ struct wakeup_source;
+ struct pm_domain_data {
+@@ -482,6 +489,8 @@ struct dev_pm_info {
+       unsigned long           accounting_timestamp;
+ #endif
+       struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
++      struct pm_qos_constraints *constraints;
++      enum dev_pm_qos_state   constraints_state;
+ };
+ extern void update_pm_runtime_accounting(struct device *dev);
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 84aa150..f75f74d 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -19,12 +19,18 @@
+ #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE      (2000 * USEC_PER_SEC)
+ #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE      (2000 * USEC_PER_SEC)
+ #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE       0
++#define PM_QOS_DEV_LAT_DEFAULT_VALUE          0
+ struct pm_qos_request {
+       struct plist_node node;
+       int pm_qos_class;
+ };
++struct dev_pm_qos_request {
++      struct plist_node node;
++      struct device *dev;
++};
++
+ enum pm_qos_type {
+       PM_QOS_UNITIALIZED,
+       PM_QOS_MAX,             /* return the largest value */
+@@ -51,6 +57,11 @@ enum pm_qos_req_action {
+       PM_QOS_REMOVE_REQ       /* Remove an existing request */
+ };
++static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
++{
++      return req->dev != 0;
++}
++
+ #ifdef CONFIG_PM
+ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+                        enum pm_qos_req_action action, int value);
+@@ -64,6 +75,17 @@ int pm_qos_request(int pm_qos_class);
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request *req);
++
++int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
++                         s32 value);
++int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
++int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
++int dev_pm_qos_add_notifier(struct device *dev,
++                          struct notifier_block *notifier);
++int dev_pm_qos_remove_notifier(struct device *dev,
++                             struct notifier_block *notifier);
++void dev_pm_qos_constraints_init(struct device *dev);
++void dev_pm_qos_constraints_destroy(struct device *dev);
+ #else
+ static inline int pm_qos_update_target(struct pm_qos_constraints *c,
+                                      struct plist_node *node,
+@@ -89,6 +111,26 @@ static inline int pm_qos_remove_notifier(int pm_qos_class,
+                       { return 0; }
+ static inline int pm_qos_request_active(struct pm_qos_request *req)
+                       { return 0; }
++
++static inline int dev_pm_qos_add_request(struct device *dev,
++                                       struct dev_pm_qos_request *req,
++                                       s32 value)
++                      { return 0; }
++static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
++                                          s32 new_value)
++                      { return 0; }
++static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
++                      { return 0; }
++static inline int dev_pm_qos_add_notifier(struct device *dev,
++                                        struct notifier_block *notifier)
++                      { return 0; }
++static inline int dev_pm_qos_remove_notifier(struct device *dev,
++                                           struct notifier_block *notifier)
++                      { return 0; }
++static inline void dev_pm_qos_constraints_init(struct device *dev)
++                      { return; }
++static inline void dev_pm_qos_constraints_destroy(struct device *dev)
++                      { return; }
+ #endif
+ #endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0061-PM-QoS-Add-global-notification-mechanism-for-device-.patch b/patches.runtime_pm/0061-PM-QoS-Add-global-notification-mechanism-for-device-.patch
new file mode 100644 (file)
index 0000000..73d9b20
--- /dev/null
@@ -0,0 +1,230 @@
+From 19789f2ea81c2739287ed68c195a31a89696e912 Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Thu, 25 Aug 2011 15:35:47 +0200
+Subject: PM QoS: Add global notification mechanism for device constraints
+
+Add a global notification chain that gets called upon changes to the
+aggregated constraint value for any device.
+The notification callbacks are passing the full constraint request data
+in order for the callees to have access to it. The current use is for the
+platform low-level code to access the target device of the constraint.
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit b66213cdb002b08b29603d488c451dfe25e2ca20)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/qos.c |   89 ++++++++++++++++++++++++++++++++++++++--------
+ include/linux/pm_qos.h   |   11 ++++++
+ kernel/power/qos.c       |    2 +-
+ 3 files changed, 87 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index cc4c541..8d0b811 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -17,6 +17,12 @@
+  *
+  * This QoS design is best effort based. Dependents register their QoS needs.
+  * Watchers register to keep track of the current QoS needs of the system.
++ * Watchers can register different types of notification callbacks:
++ *  . a per-device notification callback using the dev_pm_qos_*_notifier API.
++ *    The notification chain data is stored in the per-device constraint
++ *    data struct.
++ *  . a system-wide notification callback using the dev_pm_qos_*_global_notifier
++ *    API. The notification chain data is stored in a static variable.
+  *
+  * Note about the per-device constraint data struct allocation:
+  * . The per-device constraints data struct ptr is tored into the device
+@@ -45,6 +51,36 @@
+ static DEFINE_MUTEX(dev_pm_qos_mtx);
++static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
++
++/*
++ * apply_constraint
++ * @req: constraint request to apply
++ * @action: action to perform add/update/remove, of type enum pm_qos_req_action
++ * @value: defines the qos request
++ *
++ * Internal function to update the constraints list using the PM QoS core
++ * code and if needed call the per-device and the global notification
++ * callbacks
++ */
++static int apply_constraint(struct dev_pm_qos_request *req,
++                          enum pm_qos_req_action action, int value)
++{
++      int ret, curr_value;
++
++      ret = pm_qos_update_target(req->dev->power.constraints,
++                                 &req->node, action, value);
++
++      if (ret) {
++              /* Call the global callbacks if needed */
++              curr_value = pm_qos_read_value(req->dev->power.constraints);
++              blocking_notifier_call_chain(&dev_pm_notifiers,
++                                           (unsigned long)curr_value,
++                                           req);
++      }
++
++      return ret;
++}
+ /*
+  * dev_pm_qos_constraints_allocate
+@@ -111,12 +147,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
+                                         &dev->power.constraints->list,
+                                         node) {
+                       /*
+-                       * Update constraints list and call the per-device
++                       * Update constraints list and call the notification
+                        * callbacks if needed
+                        */
+-                      pm_qos_update_target(req->dev->power.constraints,
+-                                         &req->node, PM_QOS_REMOVE_REQ,
+-                                         PM_QOS_DEFAULT_VALUE);
++                      apply_constraint(req, PM_QOS_REMOVE_REQ,
++                                       PM_QOS_DEFAULT_VALUE);
+                       memset(req, 0, sizeof(*req));
+               }
+@@ -147,7 +182,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
+  * removed from the system
+  */
+ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+-                          s32 value)
++                         s32 value)
+ {
+       int ret = 0;
+@@ -178,8 +213,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+               ret = dev_pm_qos_constraints_allocate(dev);
+       if (!ret)
+-              ret = pm_qos_update_target(dev->power.constraints, &req->node,
+-                                         PM_QOS_ADD_REQ, value);
++              ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ out:
+       mutex_unlock(&dev_pm_qos_mtx);
+@@ -220,10 +254,8 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+       if (req->dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
+               if (new_value != req->node.prio)
+-                      ret = pm_qos_update_target(req->dev->power.constraints,
+-                                                 &req->node,
+-                                                 PM_QOS_UPDATE_REQ,
+-                                                 new_value);
++                      ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
++                                             new_value);
+       } else {
+               /* Return if the device has been removed */
+               ret = -ENODEV;
+@@ -262,9 +294,8 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+       mutex_lock(&dev_pm_qos_mtx);
+       if (req->dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
+-              ret = pm_qos_update_target(req->dev->power.constraints,
+-                                         &req->node, PM_QOS_REMOVE_REQ,
+-                                         PM_QOS_DEFAULT_VALUE);
++              ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
++                                     PM_QOS_DEFAULT_VALUE);
+               memset(req, 0, sizeof(*req));
+       } else {
+               /* Return if the device has been removed */
+@@ -336,3 +367,33 @@ out:
+       return retval;
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
++
++/**
++ * dev_pm_qos_add_global_notifier - sets notification entry for changes to
++ * target value of the PM QoS constraints for any device
++ *
++ * @notifier: notifier block managed by caller.
++ *
++ * Will register the notifier into a notification chain that gets called
++ * upon changes to the target value for any device.
++ */
++int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
++{
++      return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
++
++/**
++ * dev_pm_qos_remove_global_notifier - deletes notification for changes to
++ * target value of PM QoS constraints for any device
++ *
++ * @notifier: notifier block to be removed.
++ *
++ * Will remove the notifier from the notification chain that gets called
++ * upon changes to the target value for any device.
++ */
++int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
++{
++      return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index f75f74d..ca7bd3f 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -75,6 +75,7 @@ int pm_qos_request(int pm_qos_class);
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request *req);
++s32 pm_qos_read_value(struct pm_qos_constraints *c);
+ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+                          s32 value);
+@@ -84,6 +85,8 @@ int dev_pm_qos_add_notifier(struct device *dev,
+                           struct notifier_block *notifier);
+ int dev_pm_qos_remove_notifier(struct device *dev,
+                              struct notifier_block *notifier);
++int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
++int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
+ void dev_pm_qos_constraints_init(struct device *dev);
+ void dev_pm_qos_constraints_destroy(struct device *dev);
+ #else
+@@ -111,6 +114,8 @@ static inline int pm_qos_remove_notifier(int pm_qos_class,
+                       { return 0; }
+ static inline int pm_qos_request_active(struct pm_qos_request *req)
+                       { return 0; }
++static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
++                      { return 0; }
+ static inline int dev_pm_qos_add_request(struct device *dev,
+                                        struct dev_pm_qos_request *req,
+@@ -127,6 +132,12 @@ static inline int dev_pm_qos_add_notifier(struct device *dev,
+ static inline int dev_pm_qos_remove_notifier(struct device *dev,
+                                            struct notifier_block *notifier)
+                       { return 0; }
++static inline int dev_pm_qos_add_global_notifier(
++                                      struct notifier_block *notifier)
++                      { return 0; }
++static inline int dev_pm_qos_remove_global_notifier(
++                                      struct notifier_block *notifier)
++                      { return 0; }
+ static inline void dev_pm_qos_constraints_init(struct device *dev)
+                       { return; }
+ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 7c7cd18..1c1797d 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -140,7 +140,7 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
+       }
+ }
+-static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
++s32 pm_qos_read_value(struct pm_qos_constraints *c)
+ {
+       return c->target_value;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0062-PM-Domains-Preliminary-support-for-devices-with-powe.patch b/patches.runtime_pm/0062-PM-Domains-Preliminary-support-for-devices-with-powe.patch
new file mode 100644 (file)
index 0000000..ffa6493
--- /dev/null
@@ -0,0 +1,110 @@
+From f5cf18d0d692aaf9fc087e491a08f8eb8be35d66 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 25 Aug 2011 15:37:04 +0200
+Subject: PM / Domains: Preliminary support for devices with power.irq_safe
+ set
+
+The generic PM domains framework currently doesn't work with devices
+whose power.irq_safe flag is set, because runtime PM callbacks for
+such devices are run with interrupts disabled and the callbacks
+provided by the generic PM domains framework use domain mutexes
+and may sleep.  However, such devices very well may belong to
+power domains on some systems, so the generic PM domains framework
+should take them into account.
+
+For this reason, modify the generic PM domains framework so that the
+domain .power_off() and .power_on() callbacks are never executed for
+a domain containing devices with power.irq_safe set, although the
+.stop_device() and .start_device() callbacks are still run for them.
+
+Additionally, introduce a flag allowing the creator of a
+struct generic_pm_domain object to indicate that its .stop_device()
+and .start_device() callbacks may be run in interrupt context
+(might_sleep_if() triggers if that flag is not set and one of those
+callbacks is run in interrupt context).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 0aa2a221696cc8ea20a4cdca01315d3b6b4ecc4d)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   19 ++++++++++++++++++-
+ include/linux/pm_domain.h   |    1 +
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 339eb2d..c2468a7 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -309,7 +309,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       not_suspended = 0;
+       list_for_each_entry(pdd, &genpd->dev_list, list_node)
+-              if (pdd->dev->driver && !pm_runtime_suspended(pdd->dev))
++              if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
++                  || pdd->dev->power.irq_safe))
+                       not_suspended++;
+       if (not_suspended > genpd->in_progress)
+@@ -417,12 +418,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
++      might_sleep_if(!genpd->dev_irq_safe);
++
+       if (genpd->stop_device) {
+               int ret = genpd->stop_device(dev);
+               if (ret)
+                       return ret;
+       }
++      /*
++       * If power.irq_safe is set, this routine will be run with interrupts
++       * off, so it can't use mutexes.
++       */
++      if (dev->power.irq_safe)
++              return 0;
++
+       mutex_lock(&genpd->lock);
+       genpd->in_progress++;
+       pm_genpd_poweroff(genpd);
+@@ -452,6 +462,12 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
++      might_sleep_if(!genpd->dev_irq_safe);
++
++      /* If power.irq_safe, the PM domain is never powered off. */
++      if (dev->power.irq_safe)
++              goto out;
++
+       mutex_lock(&genpd->lock);
+       ret = __pm_genpd_poweron(genpd);
+       if (ret) {
+@@ -483,6 +499,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       wake_up_all(&genpd->status_wait_queue);
+       mutex_unlock(&genpd->lock);
++ out:
+       if (genpd->start_device)
+               genpd->start_device(dev);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 5cce46c..2538d90 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -42,6 +42,7 @@ struct generic_pm_domain {
+       unsigned int suspended_count;   /* System suspend device counter */
+       unsigned int prepared_count;    /* Suspend counter of prepared devices */
+       bool suspend_power_off; /* Power status before system suspend */
++      bool dev_irq_safe;      /* Device callbacks are IRQ-safe */
+       int (*power_off)(struct generic_pm_domain *domain);
+       int (*power_on)(struct generic_pm_domain *domain);
+       int (*start_device)(struct device *dev);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0063-PM-Runtime-pm_runtime_idle-can-be-called-in-atomic-c.patch b/patches.runtime_pm/0063-PM-Runtime-pm_runtime_idle-can-be-called-in-atomic-c.patch
new file mode 100644 (file)
index 0000000..14e2e5d
--- /dev/null
@@ -0,0 +1,33 @@
+From 0204fb7b94314c8d49cec3a707aedfcca93b7386 Mon Sep 17 00:00:00 2001
+From: Ming Lei <tom.leiming@gmail.com>
+Date: Wed, 21 Sep 2011 22:31:33 +0200
+Subject: PM / Runtime: pm_runtime_idle() can be called in atomic context
+
+Add to pm_runtime_idle the list of functions that can be called
+in atomic context if pm_runtime_irq_safe() has been called for the
+device.
+
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 2e6ba515f50ef7ddf35b2703d014d3216c9b8b24)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |    1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 08d70e4..1f05404 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -477,6 +477,7 @@ pm_runtime_autosuspend_expiration()
+ If pm_runtime_irq_safe() has been called for a device then the following helper
+ functions may also be used in interrupt context:
++pm_runtime_idle()
+ pm_runtime_suspend()
+ pm_runtime_autosuspend()
+ pm_runtime_resume()
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0064-cpu_pm-Add-cpu-power-management-notifiers.patch b/patches.runtime_pm/0064-cpu_pm-Add-cpu-power-management-notifiers.patch
new file mode 100644 (file)
index 0000000..767d829
--- /dev/null
@@ -0,0 +1,382 @@
+From b34e87d056193445b747806d64095928cd10af57 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Thu, 10 Feb 2011 02:04:45 -0800
+Subject: cpu_pm: Add cpu power management notifiers
+
+During some CPU power modes entered during idle, hotplug and
+suspend, peripherals located in the CPU power domain, such as
+the GIC, localtimers, and VFP, may be powered down.  Add a
+notifier chain that allows drivers for those peripherals to
+be notified before and after they may be reset.
+
+Notified drivers can include VFP co-processor, interrupt controller
+and it's PM extensions, local CPU timers context save/restore which
+shouldn't be interrupted. Hence CPU PM event APIs  must be called
+with interrupts disabled.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Tested-and-Acked-by: Shawn Guo <shawn.guo@linaro.org>
+Tested-by: Kevin Hilman <khilman@ti.com>
+Tested-by: Vishwanath BS <vishwanath.bs@ti.com>
+(cherry picked from commit ab10023e0088d5075354afc7cb9e72304757dddd)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/cpu_pm.h |  109 ++++++++++++++++++++++++++
+ kernel/Makefile        |    1 +
+ kernel/cpu_pm.c        |  200 ++++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/power/Kconfig   |    4 +
+ 4 files changed, 314 insertions(+)
+ create mode 100644 include/linux/cpu_pm.h
+ create mode 100644 kernel/cpu_pm.c
+
+diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
+new file mode 100644
+index 0000000..455b233
+--- /dev/null
++++ b/include/linux/cpu_pm.h
+@@ -0,0 +1,109 @@
++/*
++ * Copyright (C) 2011 Google, Inc.
++ *
++ * Author:
++ *    Colin Cross <ccross@android.com>
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#ifndef _LINUX_CPU_PM_H
++#define _LINUX_CPU_PM_H
++
++#include <linux/kernel.h>
++#include <linux/notifier.h>
++
++/*
++ * When a CPU goes to a low power state that turns off power to the CPU's
++ * power domain, the contents of some blocks (floating point coprocessors,
++ * interrupt controllers, caches, timers) in the same power domain can
++ * be lost.  The cpm_pm notifiers provide a method for platform idle, suspend,
++ * and hotplug implementations to notify the drivers for these blocks that
++ * they may be reset.
++ *
++ * All cpu_pm notifications must be called with interrupts disabled.
++ *
++ * The notifications are split into two classes: CPU notifications and CPU
++ * cluster notifications.
++ *
++ * CPU notifications apply to a single CPU and must be called on the affected
++ * CPU.  They are used to save per-cpu context for affected blocks.
++ *
++ * CPU cluster notifications apply to all CPUs in a single power domain. They
++ * are used to save any global context for affected blocks, and must be called
++ * after all the CPUs in the power domain have been notified of the low power
++ * state.
++ */
++
++/*
++ * Event codes passed as unsigned long val to notifier calls
++ */
++enum cpu_pm_event {
++      /* A single cpu is entering a low power state */
++      CPU_PM_ENTER,
++
++      /* A single cpu failed to enter a low power state */
++      CPU_PM_ENTER_FAILED,
++
++      /* A single cpu is exiting a low power state */
++      CPU_PM_EXIT,
++
++      /* A cpu power domain is entering a low power state */
++      CPU_CLUSTER_PM_ENTER,
++
++      /* A cpu power domain failed to enter a low power state */
++      CPU_CLUSTER_PM_ENTER_FAILED,
++
++      /* A cpu power domain is exiting a low power state */
++      CPU_CLUSTER_PM_EXIT,
++};
++
++#ifdef CONFIG_CPU_PM
++int cpu_pm_register_notifier(struct notifier_block *nb);
++int cpu_pm_unregister_notifier(struct notifier_block *nb);
++int cpu_pm_enter(void);
++int cpu_pm_exit(void);
++int cpu_cluster_pm_enter(void);
++int cpu_cluster_pm_exit(void);
++
++#else
++
++static inline int cpu_pm_register_notifier(struct notifier_block *nb)
++{
++      return 0;
++}
++
++static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
++{
++      return 0;
++}
++
++static inline int cpu_pm_enter(void)
++{
++      return 0;
++}
++
++static inline int cpu_pm_exit(void)
++{
++      return 0;
++}
++
++static inline int cpu_cluster_pm_enter(void)
++{
++      return 0;
++}
++
++static inline int cpu_cluster_pm_exit(void)
++{
++      return 0;
++}
++#endif
++#endif
+diff --git a/kernel/Makefile b/kernel/Makefile
+index c4547c7..de5198f 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -101,6 +101,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
+ obj-$(CONFIG_TRACEPOINTS) += trace/
+ obj-$(CONFIG_SMP) += sched_cpupri.o
+ obj-$(CONFIG_IRQ_WORK) += irq_work.o
++obj-$(CONFIG_CPU_PM) += cpu_pm.o
+ obj-$(CONFIG_PERF_EVENTS) += events/
+diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
+new file mode 100644
+index 0000000..4d1ff4a
+--- /dev/null
++++ b/kernel/cpu_pm.c
+@@ -0,0 +1,200 @@
++/*
++ * Copyright (C) 2011 Google, Inc.
++ *
++ * Author:
++ *    Colin Cross <ccross@android.com>
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/cpu_pm.h>
++#include <linux/module.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++
++static DEFINE_RWLOCK(cpu_pm_notifier_lock);
++static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
++
++static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
++{
++      int ret;
++
++      ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
++              nr_to_call, nr_calls);
++
++      return notifier_to_errno(ret);
++}
++
++/**
++ * cpu_pm_register_notifier - register a driver with cpu_pm
++ * @nb: notifier block to register
++ *
++ * Add a driver to a list of drivers that are notified about
++ * CPU and CPU cluster low power entry and exit.
++ *
++ * This function may sleep, and has the same return conditions as
++ * raw_notifier_chain_register.
++ */
++int cpu_pm_register_notifier(struct notifier_block *nb)
++{
++      unsigned long flags;
++      int ret;
++
++      write_lock_irqsave(&cpu_pm_notifier_lock, flags);
++      ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
++      write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
++
++/**
++ * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
++ * @nb: notifier block to be unregistered
++ *
++ * Remove a driver from the CPU PM notifier list.
++ *
++ * This function may sleep, and has the same return conditions as
++ * raw_notifier_chain_unregister.
++ */
++int cpu_pm_unregister_notifier(struct notifier_block *nb)
++{
++      unsigned long flags;
++      int ret;
++
++      write_lock_irqsave(&cpu_pm_notifier_lock, flags);
++      ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
++      write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
++
++/**
++ * cpm_pm_enter - CPU low power entry notifier
++ *
++ * Notifies listeners that a single CPU is entering a low power state that may
++ * cause some blocks in the same power domain as the cpu to reset.
++ *
++ * Must be called on the affected CPU with interrupts disabled.  Platform is
++ * responsible for ensuring that cpu_pm_enter is not called twice on the same
++ * CPU before cpu_pm_exit is called. Notified drivers can include VFP
++ * co-processor, interrupt controller and it's PM extensions, local CPU
++ * timers context save/restore which shouldn't be interrupted. Hence it
++ * must be called with interrupts disabled.
++ *
++ * Return conditions are same as __raw_notifier_call_chain.
++ */
++int cpu_pm_enter(void)
++{
++      int nr_calls;
++      int ret = 0;
++
++      read_lock(&cpu_pm_notifier_lock);
++      ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
++      if (ret)
++              /*
++               * Inform listeners (nr_calls - 1) about failure of CPU PM
++               * PM entry who are notified earlier to prepare for it.
++               */
++              cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
++      read_unlock(&cpu_pm_notifier_lock);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cpu_pm_enter);
++
++/**
++ * cpm_pm_exit - CPU low power exit notifier
++ *
++ * Notifies listeners that a single CPU is exiting a low power state that may
++ * have caused some blocks in the same power domain as the cpu to reset.
++ *
++ * Notified drivers can include VFP co-processor, interrupt controller
++ * and it's PM extensions, local CPU timers context save/restore which
++ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
++ *
++ * Return conditions are same as __raw_notifier_call_chain.
++ */
++int cpu_pm_exit(void)
++{
++      int ret;
++
++      read_lock(&cpu_pm_notifier_lock);
++      ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
++      read_unlock(&cpu_pm_notifier_lock);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cpu_pm_exit);
++
++/**
++ * cpm_cluster_pm_enter - CPU cluster low power entry notifier
++ *
++ * Notifies listeners that all cpus in a power domain are entering a low power
++ * state that may cause some blocks in the same power domain to reset.
++ *
++ * Must be called after cpu_pm_enter has been called on all cpus in the power
++ * domain, and before cpu_pm_exit has been called on any cpu in the power
++ * domain. Notified drivers can include VFP co-processor, interrupt controller
++ * and it's PM extensions, local CPU timers context save/restore which
++ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
++ *
++ * Must be called with interrupts disabled.
++ *
++ * Return conditions are same as __raw_notifier_call_chain.
++ */
++int cpu_cluster_pm_enter(void)
++{
++      int nr_calls;
++      int ret = 0;
++
++      read_lock(&cpu_pm_notifier_lock);
++      ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
++      if (ret)
++              /*
++               * Inform listeners (nr_calls - 1) about failure of CPU cluster
++               * PM entry who are notified earlier to prepare for it.
++               */
++              cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
++      read_unlock(&cpu_pm_notifier_lock);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
++
++/**
++ * cpm_cluster_pm_exit - CPU cluster low power exit notifier
++ *
++ * Notifies listeners that all cpus in a power domain are exiting form a
++ * low power state that may have caused some blocks in the same power domain
++ * to reset.
++ *
++ * Must be called after cpu_pm_exit has been called on all cpus in the power
++ * domain, and before cpu_pm_exit has been called on any cpu in the power
++ * domain. Notified drivers can include VFP co-processor, interrupt controller
++ * and it's PM extensions, local CPU timers context save/restore which
++ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
++ *
++ * Return conditions are same as __raw_notifier_call_chain.
++ */
++int cpu_cluster_pm_exit(void)
++{
++      int ret;
++
++      read_lock(&cpu_pm_notifier_lock);
++      ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
++      read_unlock(&cpu_pm_notifier_lock);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index 3744c59..80a8597 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -235,3 +235,7 @@ config PM_GENERIC_DOMAINS
+ config PM_GENERIC_DOMAINS_RUNTIME
+       def_bool y
+       depends on PM_RUNTIME && PM_GENERIC_DOMAINS
++
++config CPU_PM
++      bool
++      depends on SUSPEND || CPU_IDLE
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0065-PM-Clocks-Do-not-acquire-a-mutex-under-a-spinlock.patch b/patches.runtime_pm/0065-PM-Clocks-Do-not-acquire-a-mutex-under-a-spinlock.patch
new file mode 100644 (file)
index 0000000..58de9b0
--- /dev/null
@@ -0,0 +1,194 @@
+From f7518aaf30e679acdf93726d54504bf127135fa9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 26 Sep 2011 19:40:23 +0200
+Subject: PM / Clocks: Do not acquire a mutex under a spinlock
+
+Commit b7ab83e (PM: Use spinlock instead of mutex in clock
+management functions) introduced a regression causing clocks_mutex
+to be acquired under a spinlock.  This happens because
+pm_clk_suspend() and pm_clk_resume() call pm_clk_acquire() under
+pcd->lock, but pm_clk_acquire() executes clk_get() which causes
+clocks_mutex to be acquired.  Similarly, __pm_clk_remove(),
+executed under pcd->lock, calls clk_put(), which also causes
+clocks_mutex to be acquired.
+
+To fix those problems make pm_clk_add() call pm_clk_acquire(), so
+that pm_clk_suspend() and pm_clk_resume() don't have to do that.
+Change pm_clk_remove() and pm_clk_destroy() to separate
+modifications of the pcd->clock_list list from the actual removal of
+PM clock entry objects done by __pm_clk_remove().
+
+Reported-and-tested-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
+(cherry picked from commit e8b364b88cc4001b21c28c1ecf1e1e3ffbe162e6)
+
+Conflicts:
+
+       drivers/base/power/clock_ops.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/clock_ops.c |   76 +++++++++++++++++++++-------------------
+ 1 file changed, 39 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index cb44b58..b876e60 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -32,6 +32,22 @@ struct pm_clock_entry {
+ };
+ /**
++ * pm_clk_acquire - Acquire a device clock.
++ * @dev: Device whose clock is to be acquired.
++ * @ce: PM clock entry corresponding to the clock.
++ */
++static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
++{
++      ce->clk = clk_get(dev, ce->con_id);
++      if (IS_ERR(ce->clk)) {
++              ce->status = PCE_STATUS_ERROR;
++      } else {
++              ce->status = PCE_STATUS_ACQUIRED;
++              dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
++      }
++}
++
++/**
+  * pm_clk_add - Start using a device clock for power management.
+  * @dev: Device whose clock is going to be used for power management.
+  * @con_id: Connection ID of the clock.
+@@ -63,6 +79,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
+               }
+       }
++      pm_clk_acquire(dev, ce);
++
+       spin_lock_irq(&psd->lock);
+       list_add_tail(&ce->node, &psd->clock_list);
+       spin_unlock_irq(&psd->lock);
+@@ -72,17 +90,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
+ /**
+  * __pm_clk_remove - Destroy PM clock entry.
+  * @ce: PM clock entry to destroy.
+- *
+- * This routine must be called under the spinlock protecting the PM list of
+- * clocks corresponding the the @ce's device.
+  */
+ static void __pm_clk_remove(struct pm_clock_entry *ce)
+ {
+       if (!ce)
+               return;
+-      list_del(&ce->node);
+-
+       if (ce->status < PCE_STATUS_ERROR) {
+               if (ce->status == PCE_STATUS_ENABLED)
+                       clk_disable(ce->clk);
+@@ -116,18 +129,22 @@ void pm_clk_remove(struct device *dev, const char *con_id)
+       spin_lock_irq(&psd->lock);
+       list_for_each_entry(ce, &psd->clock_list, node) {
+-              if (!con_id && !ce->con_id) {
+-                      __pm_clk_remove(ce);
+-                      break;
+-              } else if (!con_id || !ce->con_id) {
++              if (!con_id && !ce->con_id)
++                      goto remove;
++              else if (!con_id || !ce->con_id)
+                       continue;
+-              } else if (!strcmp(con_id, ce->con_id)) {
+-                      __pm_clk_remove(ce);
+-                      break;
+-              }
++              else if (!strcmp(con_id, ce->con_id))
++                      goto remove;
+       }
+       spin_unlock_irq(&psd->lock);
++      return;
++
++ remove:
++      list_del(&ce->node);
++      spin_unlock_irq(&psd->lock);
++
++      __pm_clk_remove(ce);
+ }
+ /**
+@@ -169,18 +186,26 @@ void pm_clk_destroy(struct device *dev)
+ {
+       struct pm_subsys_data *psd = dev_to_psd(dev);
+       struct pm_clock_entry *ce, *c;
++      struct list_head list;
+       if (!psd)
+               return;
++      INIT_LIST_HEAD(&list);
++
+       spin_lock_irq(&psd->lock);
+       list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+-              __pm_clk_remove(ce);
++              list_move(&ce->node, &list);
+       spin_unlock_irq(&psd->lock);
+       dev_pm_put_subsys_data(dev);
++
++      list_for_each_entry_safe_reverse(ce, c, &list, node) {
++              list_del(&ce->node);
++              __pm_clk_remove(ce);
++      }
+ }
+ #endif /* CONFIG_PM */
+@@ -188,23 +213,6 @@ void pm_clk_destroy(struct device *dev)
+ #ifdef CONFIG_PM_RUNTIME
+ /**
+- * pm_clk_acquire - Acquire a device clock.
+- * @dev: Device whose clock is to be acquired.
+- * @con_id: Connection ID of the clock.
+- */
+-static void pm_clk_acquire(struct device *dev,
+-                                  struct pm_clock_entry *ce)
+-{
+-      ce->clk = clk_get(dev, ce->con_id);
+-      if (IS_ERR(ce->clk)) {
+-              ce->status = PCE_STATUS_ERROR;
+-      } else {
+-              ce->status = PCE_STATUS_ACQUIRED;
+-              dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+-      }
+-}
+-
+-/**
+  * pm_clk_suspend - Disable clocks in a device's PM clock list.
+  * @dev: Device to disable the clocks for.
+  */
+@@ -222,9 +230,6 @@ int pm_clk_suspend(struct device *dev)
+       spin_lock_irqsave(&psd->lock, flags);
+       list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+-              if (ce->status == PCE_STATUS_NONE)
+-                      pm_clk_acquire(dev, ce);
+-
+               if (ce->status < PCE_STATUS_ERROR) {
+                       clk_disable(ce->clk);
+                       ce->status = PCE_STATUS_ACQUIRED;
+@@ -254,9 +259,6 @@ int pm_clk_resume(struct device *dev)
+       spin_lock_irqsave(&psd->lock, flags);
+       list_for_each_entry(ce, &psd->clock_list, node) {
+-              if (ce->status == PCE_STATUS_NONE)
+-                      pm_clk_acquire(dev, ce);
+-
+               if (ce->status < PCE_STATUS_ERROR) {
+                       clk_enable(ce->clk);
+                       ce->status = PCE_STATUS_ENABLED;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0066-PM-Domains-Split-device-PM-domain-data-into-base-and.patch b/patches.runtime_pm/0066-PM-Domains-Split-device-PM-domain-data-into-base-and.patch
new file mode 100644 (file)
index 0000000..0640cc2
--- /dev/null
@@ -0,0 +1,173 @@
+From e62d564194eff6032d2447d26059ee9e3ebab253 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 26 Sep 2011 20:22:02 +0200
+Subject: PM / Domains: Split device PM domain data into base and need_restore
+
+The struct pm_domain_data data type is defined in such a way that
+adding new fields specific to the generic PM domains code will
+require include/linux/pm.h to be modified.  As a result, data types
+used only by the generic PM domains code will be defined in two
+headers, although they all should be defined in pm_domain.h and
+pm.h will need to include more headers, which won't be very nice.
+
+For this reason change the definition of struct pm_subsys_data
+so that its domain_data member is a pointer, which will allow
+struct pm_domain_data to be subclassed by various PM domains
+implementations.  Remove the need_restore member from
+struct pm_domain_data and make the generic PM domains code
+subclass it by adding the need_restore member to the new data type.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit cd0ea672f58d5cfdea271c45cec0c897f2b792aa)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   28 +++++++++++++++++++---------
+ include/linux/pm.h          |    3 +--
+ include/linux/pm_domain.h   |   10 ++++++++++
+ 3 files changed, 30 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index c2468a7..22fe029 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -188,11 +188,12 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
+                                 struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
++      struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+       struct device *dev = pdd->dev;
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+-      if (pdd->need_restore)
++      if (gpd_data->need_restore)
+               return 0;
+       mutex_unlock(&genpd->lock);
+@@ -210,7 +211,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
+       mutex_lock(&genpd->lock);
+       if (!ret)
+-              pdd->need_restore = true;
++              gpd_data->need_restore = true;
+       return ret;
+ }
+@@ -224,10 +225,11 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
+                                     struct generic_pm_domain *genpd)
+       __releases(&genpd->lock) __acquires(&genpd->lock)
+ {
++      struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+       struct device *dev = pdd->dev;
+       struct device_driver *drv = dev->driver;
+-      if (!pdd->need_restore)
++      if (!gpd_data->need_restore)
+               return;
+       mutex_unlock(&genpd->lock);
+@@ -244,7 +246,7 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
+       mutex_lock(&genpd->lock);
+-      pdd->need_restore = false;
++      gpd_data->need_restore = false;
+ }
+ /**
+@@ -493,7 +495,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
+               mutex_lock(&genpd->lock);
+       }
+       finish_wait(&genpd->status_wait_queue, &wait);
+-      __pm_genpd_restore_device(&dev->power.subsys_data->domain_data, genpd);
++      __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
+       genpd->resume_count--;
+       genpd_set_active(genpd);
+       wake_up_all(&genpd->status_wait_queue);
+@@ -1080,6 +1082,7 @@ static void pm_genpd_complete(struct device *dev)
+  */
+ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+ {
++      struct generic_pm_domain_data *gpd_data;
+       struct pm_domain_data *pdd;
+       int ret = 0;
+@@ -1106,14 +1109,20 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+                       goto out;
+               }
++      gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
++      if (!gpd_data) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
+       genpd->device_count++;
+       dev->pm_domain = &genpd->domain;
+       dev_pm_get_subsys_data(dev);
+-      pdd = &dev->power.subsys_data->domain_data;
+-      pdd->dev = dev;
+-      pdd->need_restore = false;
+-      list_add_tail(&pdd->list_node, &genpd->dev_list);
++      dev->power.subsys_data->domain_data = &gpd_data->base;
++      gpd_data->base.dev = dev;
++      gpd_data->need_restore = false;
++      list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+  out:
+       genpd_release_lock(genpd);
+@@ -1152,6 +1161,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+               pdd->dev = NULL;
+               dev_pm_put_subsys_data(dev);
+               dev->pm_domain = NULL;
++              kfree(to_gpd_data(pdd));
+               genpd->device_count--;
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index c0fc859..70b79e7 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -433,7 +433,6 @@ struct wakeup_source;
+ struct pm_domain_data {
+       struct list_head list_node;
+       struct device *dev;
+-      bool need_restore;
+ };
+ struct pm_subsys_data {
+@@ -443,7 +442,7 @@ struct pm_subsys_data {
+       struct list_head clock_list;
+ #endif
+ #ifdef CONFIG_PM_GENERIC_DOMAINS
+-      struct pm_domain_data domain_data;
++      struct pm_domain_data *domain_data;
+ #endif
+ };
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 2538d90..65633e5 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -62,6 +62,16 @@ struct gpd_link {
+       struct list_head slave_node;
+ };
++struct generic_pm_domain_data {
++      struct pm_domain_data base;
++      bool need_restore;
++};
++
++static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
++{
++      return container_of(pdd, struct generic_pm_domain_data, base);
++}
++
+ #ifdef CONFIG_PM_GENERIC_DOMAINS
+ extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                              struct device *dev);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0067-doc-fix-broken-references.patch b/patches.runtime_pm/0067-doc-fix-broken-references.patch
new file mode 100644 (file)
index 0000000..cd1df82
--- /dev/null
@@ -0,0 +1,136 @@
+From 0eaad2d7475d41b9bb7afcecbe99aaafcc8a2761 Mon Sep 17 00:00:00 2001
+From: Paul Bolle <pebolle@tiscali.nl>
+Date: Mon, 15 Aug 2011 02:02:26 +0200
+Subject: doc: fix broken references
+
+There are numerous broken references to Documentation files (in other
+Documentation files, in comments, etc.). These broken references are
+caused by typo's in the references, and by renames or removals of the
+Documentation files. Some broken references are simply odd.
+
+Fix these broken references, sometimes by dropping the irrelevant text
+they were part of.
+
+Signed-off-by: Paul Bolle <pebolle@tiscali.nl>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+(cherry picked from commit 395cf9691d72173d8cdaa613c5f0255f993af94b)
+
+Conflicts:
+
+       Documentation/PCI/pci.txt
+       Documentation/blackfin/bfin-gpio-notes.txt
+       Documentation/block/biodoc.txt
+       Documentation/bus-virt-phys-mapping.txt
+       Documentation/cdrom/packet-writing.txt
+       Documentation/development-process/4.Coding
+       Documentation/devicetree/bindings/gpio/led.txt
+       Documentation/filesystems/caching/object.txt
+       Documentation/filesystems/locks.txt
+       Documentation/filesystems/nfs/idmapper.txt
+       Documentation/filesystems/pohmelfs/design_notes.txt
+       Documentation/filesystems/proc.txt
+       Documentation/filesystems/vfs.txt
+       Documentation/frv/booting.txt
+       Documentation/input/input.txt
+       Documentation/kernel-docs.txt
+       Documentation/kernel-parameters.txt
+       Documentation/laptops/thinkpad-acpi.txt
+       Documentation/media-framework.txt
+       Documentation/memory-barriers.txt
+       Documentation/networking/scaling.txt
+       Documentation/power/userland-swsusp.txt
+       Documentation/rfkill.txt
+       Documentation/scsi/aic7xxx_old.txt
+       Documentation/scsi/scsi_mid_low_api.txt
+       Documentation/security/keys-trusted-encrypted.txt
+       Documentation/sound/oss/PAS16
+       Documentation/spi/pxa2xx
+       Documentation/timers/highres.txt
+       Documentation/usb/dma.txt
+       Documentation/virtual/lguest/lguest.c
+       Documentation/vm/numa
+       Documentation/vm/slub.txt
+       arch/alpha/kernel/srm_env.c
+       arch/arm/Kconfig
+       arch/arm/include/asm/io.h
+       arch/arm/mach-pxa/xcep.c
+       arch/ia64/hp/common/sba_iommu.c
+       arch/m68k/q40/README
+       arch/microblaze/include/asm/dma-mapping.h
+       arch/mips/include/asm/lasat/lasat.h
+       arch/mn10300/Kconfig
+       arch/mn10300/kernel/irq.c
+       arch/openrisc/Kconfig
+       arch/openrisc/include/asm/dma-mapping.h
+       arch/parisc/include/asm/dma-mapping.h
+       arch/parisc/kernel/pci-dma.c
+       arch/powerpc/include/asm/qe.h
+       arch/powerpc/sysdev/qe_lib/qe.c
+       arch/unicore32/include/asm/io.h
+       arch/x86/Kconfig
+       arch/x86/Kconfig.debug
+       arch/x86/boot/header.S
+       arch/x86/include/asm/dma-mapping.h
+       arch/x86/kernel/amd_gart_64.c
+       arch/x86/kernel/apm_32.c
+       arch/x86/kernel/pci-dma.c
+       drivers/char/apm-emulation.c
+       drivers/input/misc/rotary_encoder.c
+       drivers/leds/Kconfig
+       drivers/media/dvb/dvb-usb/af9005-remote.c
+       drivers/media/dvb/dvb-usb/af9005.c
+       drivers/media/dvb/frontends/dib3000.h
+       drivers/media/dvb/frontends/dib3000mb.c
+       drivers/mtd/Kconfig
+       drivers/net/Kconfig
+       drivers/net/can/sja1000/sja1000_of_platform.c
+       drivers/net/tulip/21142.c
+       drivers/net/tulip/eeprom.c
+       drivers/net/tulip/interrupt.c
+       drivers/net/tulip/media.c
+       drivers/net/tulip/pnic.c
+       drivers/net/tulip/pnic2.c
+       drivers/net/tulip/timer.c
+       drivers/net/tulip/tulip.h
+       drivers/net/tulip/tulip_core.c
+       drivers/parisc/sba_iommu.c
+       drivers/platform/x86/Kconfig
+       drivers/scsi/megaraid/megaraid_mbox.c
+       drivers/staging/cxt1e1/Kconfig
+       drivers/usb/serial/digi_acceleport.c
+       drivers/video/igafb.c
+       drivers/watchdog/smsc37b787_wdt.c
+       fs/configfs/inode.c
+       fs/configfs/item.c
+       fs/locks.c
+       fs/squashfs/Kconfig
+       include/linux/io-mapping.h
+       include/linux/isdn.h
+       include/linux/platform_data/ntc_thermistor.h
+       include/media/videobuf-dma-sg.h
+       include/target/configfs_macros.h
+       net/netfilter/Kconfig
+       sound/oss/Kconfig
+       tools/perf/util/config.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/basic-pm-debugging.txt |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
+index ddd7817..05a7fe7 100644
+--- a/Documentation/power/basic-pm-debugging.txt
++++ b/Documentation/power/basic-pm-debugging.txt
+@@ -173,7 +173,7 @@ kernel messages using the serial console.  This may provide you with some
+ information about the reasons of the suspend (resume) failure.  Alternatively,
+ it may be possible to use a FireWire port for debugging with firescope
+ (ftp://ftp.firstfloor.org/pub/ak/firescope/).  On x86 it is also possible to
+-use the PM_TRACE mechanism documented in Documentation/s2ram.txt .
++use the PM_TRACE mechanism documented in Documentation/power/s2ram.txt .
+ 2. Testing suspend to RAM (STR)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0068-PM-Runtime-Don-t-run-callbacks-under-lock-for-power..patch b/patches.runtime_pm/0068-PM-Runtime-Don-t-run-callbacks-under-lock-for-power..patch
new file mode 100644 (file)
index 0000000..7cb9e85
--- /dev/null
@@ -0,0 +1,146 @@
+From ef6791f347cd72ae5354a827004f57bdb4b6bbd7 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 27 Sep 2011 21:54:52 +0200
+Subject: PM / Runtime: Don't run callbacks under lock for power.irq_safe set
+
+The rpm_suspend() and rpm_resume() routines execute subsystem or PM
+domain callbacks under power.lock if power.irq_safe is set for the
+given device.  This is inconsistent with that rpm_idle() does after
+commit 02b2677 (PM / Runtime: Allow _put_sync() from
+interrupts-disabled context) and is problematic for subsystems and PM
+domains wanting to use power.lock for synchronization in their
+runtime PM callbacks.
+
+This change requires the code checking if the device's runtime PM
+status is RPM_SUSPENDING or RPM_RESUMING to be modified too, to take
+the power.irq_safe set case into account (that code wasn't reachable
+before with power.irq_safe set, because it's executed with the
+device's power.lock held).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Ming Lei <tom.leiming@gmail.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit ad3c36a534bc7b945d7bffdda1c62e13bf93489a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |   68 ++++++++++++++++++++++++++++--------------
+ 1 file changed, 46 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 04e18ab..aecb2a8 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -155,6 +155,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
+ }
+ /**
++ * __rpm_callback - Run a given runtime PM callback for a given device.
++ * @cb: Runtime PM callback to run.
++ * @dev: Device to run the callback for.
++ */
++static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
++      __releases(&dev->power.lock) __acquires(&dev->power.lock)
++{
++      int retval;
++
++      if (dev->power.irq_safe)
++              spin_unlock(&dev->power.lock);
++      else
++              spin_unlock_irq(&dev->power.lock);
++
++      retval = cb(dev);
++
++      if (dev->power.irq_safe)
++              spin_lock(&dev->power.lock);
++      else
++              spin_lock_irq(&dev->power.lock);
++
++      return retval;
++}
++
++/**
+  * rpm_idle - Notify device bus type if the device can be suspended.
+  * @dev: Device to notify the bus type about.
+  * @rpmflags: Flag bits.
+@@ -225,19 +250,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
+       else
+               callback = NULL;
+-      if (callback) {
+-              if (dev->power.irq_safe)
+-                      spin_unlock(&dev->power.lock);
+-              else
+-                      spin_unlock_irq(&dev->power.lock);
+-
+-              callback(dev);
+-
+-              if (dev->power.irq_safe)
+-                      spin_lock(&dev->power.lock);
+-              else
+-                      spin_lock_irq(&dev->power.lock);
+-      }
++      if (callback)
++              __rpm_callback(callback, dev);
+       dev->power.idle_notification = false;
+       wake_up_all(&dev->power.wait_queue);
+@@ -252,22 +266,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
+  * @dev: Device to run the callback for.
+  */
+ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+-      __releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+       int retval;
+       if (!cb)
+               return -ENOSYS;
+-      if (dev->power.irq_safe) {
+-              retval = cb(dev);
+-      } else {
+-              spin_unlock_irq(&dev->power.lock);
+-
+-              retval = cb(dev);
++      retval = __rpm_callback(cb, dev);
+-              spin_lock_irq(&dev->power.lock);
+-      }
+       dev->power.runtime_error = retval;
+       return retval != -EACCES ? retval : -EIO;
+ }
+@@ -347,6 +353,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+                       goto out;
+               }
++              if (dev->power.irq_safe) {
++                      spin_unlock(&dev->power.lock);
++
++                      cpu_relax();
++
++                      spin_lock(&dev->power.lock);
++                      goto repeat;
++              }
++
+               /* Wait for the other suspend running in parallel with us. */
+               for (;;) {
+                       prepare_to_wait(&dev->power.wait_queue, &wait,
+@@ -496,6 +511,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
+                       goto out;
+               }
++              if (dev->power.irq_safe) {
++                      spin_unlock(&dev->power.lock);
++
++                      cpu_relax();
++
++                      spin_lock(&dev->power.lock);
++                      goto repeat;
++              }
++
+               /* Wait for the operation carried out in parallel with us. */
+               for (;;) {
+                       prepare_to_wait(&dev->power.wait_queue, &wait,
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0069-PM-Runtime-Introduce-trace-points-for-tracing-rpm_-f.patch b/patches.runtime_pm/0069-PM-Runtime-Introduce-trace-points-for-tracing-rpm_-f.patch
new file mode 100644 (file)
index 0000000..d561ad8
--- /dev/null
@@ -0,0 +1,169 @@
+From ac521a3ac1cd25c44c786a5ef4099b389eb2fc07 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Tue, 27 Sep 2011 22:53:27 +0200
+Subject: PM / Runtime: Introduce trace points for tracing rpm_* functions
+
+This patch introduces 3 trace points to prepare for tracing
+rpm_idle/rpm_suspend/rpm_resume functions, so we can use these
+trace points to replace the current dev_dbg().
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 53b615ccca567ada1931eb04ad0614ac150c14a3)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/trace/events/rpm.h |   99 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/trace/Makefile      |    1 +
+ kernel/trace/rpm-traces.c  |   20 +++++++++
+ 3 files changed, 120 insertions(+)
+ create mode 100644 include/trace/events/rpm.h
+ create mode 100644 kernel/trace/rpm-traces.c
+
+diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
+new file mode 100644
+index 0000000..d62c558
+--- /dev/null
++++ b/include/trace/events/rpm.h
+@@ -0,0 +1,99 @@
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM rpm
++
++#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RUNTIME_POWER_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++#include <linux/device.h>
++
++/*
++ * The rpm_internal events are used for tracing some important
++ * runtime pm internal functions.
++ */
++DECLARE_EVENT_CLASS(rpm_internal,
++
++      TP_PROTO(struct device *dev, int flags),
++
++      TP_ARGS(dev, flags),
++
++      TP_STRUCT__entry(
++              __string(       name,           dev_name(dev)   )
++              __field(        int,            flags           )
++              __field(        int ,           usage_count     )
++              __field(        int ,           disable_depth   )
++              __field(        int ,           runtime_auto    )
++              __field(        int ,           request_pending )
++              __field(        int ,           irq_safe        )
++              __field(        int ,           child_count     )
++      ),
++
++      TP_fast_assign(
++              __assign_str(name, dev_name(dev));
++              __entry->flags = flags;
++              __entry->usage_count = atomic_read(
++                      &dev->power.usage_count);
++              __entry->disable_depth = dev->power.disable_depth;
++              __entry->runtime_auto = dev->power.runtime_auto;
++              __entry->request_pending = dev->power.request_pending;
++              __entry->irq_safe = dev->power.irq_safe;
++              __entry->child_count = atomic_read(
++                      &dev->power.child_count);
++      ),
++
++      TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
++                      " irq-%-1d child-%d",
++                      __get_str(name), __entry->flags,
++                      __entry->usage_count,
++                      __entry->disable_depth,
++                      __entry->runtime_auto,
++                      __entry->request_pending,
++                      __entry->irq_safe,
++                      __entry->child_count
++               )
++);
++DEFINE_EVENT(rpm_internal, rpm_suspend,
++
++      TP_PROTO(struct device *dev, int flags),
++
++      TP_ARGS(dev, flags)
++);
++DEFINE_EVENT(rpm_internal, rpm_resume,
++
++      TP_PROTO(struct device *dev, int flags),
++
++      TP_ARGS(dev, flags)
++);
++DEFINE_EVENT(rpm_internal, rpm_idle,
++
++      TP_PROTO(struct device *dev, int flags),
++
++      TP_ARGS(dev, flags)
++);
++
++TRACE_EVENT(rpm_return_int,
++      TP_PROTO(struct device *dev, unsigned long ip, int ret),
++      TP_ARGS(dev, ip, ret),
++
++      TP_STRUCT__entry(
++              __string(       name,           dev_name(dev))
++              __field(        unsigned long,          ip      )
++              __field(        int,                    ret     )
++      ),
++
++      TP_fast_assign(
++              __assign_str(name, dev_name(dev));
++              __entry->ip = ip;
++              __entry->ret = ret;
++      ),
++
++      TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
++              __entry->ret)
++);
++
++#endif /* _TRACE_RUNTIME_POWER_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index 761c510..56bdab5 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -53,6 +53,7 @@ endif
+ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
+ obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
+ obj-$(CONFIG_TRACEPOINTS) += power-traces.o
++obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
+ ifeq ($(CONFIG_TRACING),y)
+ obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
+ endif
+diff --git a/kernel/trace/rpm-traces.c b/kernel/trace/rpm-traces.c
+new file mode 100644
+index 0000000..4b3b5ea
+--- /dev/null
++++ b/kernel/trace/rpm-traces.c
+@@ -0,0 +1,20 @@
++/*
++ * Power trace points
++ *
++ * Copyright (C) 2009 Ming Lei <ming.lei@canonical.com>
++ */
++
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/usb.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/rpm.h>
++
++EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_return_int);
++EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_idle);
++EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_suspend);
++EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_resume);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0070-PM-Tracing-build-rpm-traces.c-only-if-CONFIG_PM_RUNT.patch b/patches.runtime_pm/0070-PM-Tracing-build-rpm-traces.c-only-if-CONFIG_PM_RUNT.patch
new file mode 100644 (file)
index 0000000..7ca34c1
--- /dev/null
@@ -0,0 +1,36 @@
+From 959fd5b540ba24def2cdd4fcf044c9c816bf64d8 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Thu, 29 Sep 2011 22:07:23 +0200
+Subject: PM / Tracing: build rpm-traces.c only if CONFIG_PM_RUNTIME is set
+
+Do not build kernel/trace/rpm-traces.c if CONFIG_PM_RUNTIME is not
+set, which avoids a build failure.
+
+[rjw: Added the changelog and modified the subject slightly.]
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 2a5306cc5f383b0e7414c75e458111afd4a563a4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/trace/Makefile |    2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index 56bdab5..f49405f 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -53,7 +53,9 @@ endif
+ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
+ obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
+ obj-$(CONFIG_TRACEPOINTS) += power-traces.o
++ifeq ($(CONFIG_PM_RUNTIME),y)
+ obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
++endif
+ ifeq ($(CONFIG_TRACING),y)
+ obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
+ endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0071-PM-Runtime-Replace-dev_dbg-with-trace_rpm_.patch b/patches.runtime_pm/0071-PM-Runtime-Replace-dev_dbg-with-trace_rpm_.patch
new file mode 100644 (file)
index 0000000..352b2d8
--- /dev/null
@@ -0,0 +1,105 @@
+From bd308f6cc9e3a2a842de0c1e953dcb5e4a5c352a Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Tue, 27 Sep 2011 22:54:41 +0200
+Subject: PM / Runtime: Replace dev_dbg() with trace_rpm_*()
+
+This patch replaces dev_dbg with trace_rpm_* inside
+the three important functions:
+
+       rpm_idle
+       rpm_suspend
+       rpm_resume
+
+Trace points have the below advantages compared with dev_dbg:
+
+       - trace points include much runtime information(such as
+       running cpu, current task, ...)
+
+       - most of linux distributions may disable "verbose debug"
+       driver debug compile switch, so it is very difficult to
+       report/debug runtime pm related problems from distribution
+       users without this kind of debug information.
+
+       - for upstream kernel users, enableing the debug switch will
+       produce many useless "rpm_resume" output, and it is very noise.
+
+       - dev_dbg inside rpm_suspend/rpm_resume may have some effects
+       on runtime pm behaviour of console devicer
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c3dc2f14622a06488f11452b6efd1e02c5a8548b)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index aecb2a8..7a6fb5e 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched.h>
+ #include <linux/pm_runtime.h>
++#include <trace/events/rpm.h>
+ #include "power.h"
+ static int rpm_resume(struct device *dev, int rpmflags);
+@@ -196,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
+       int (*callback)(struct device *);
+       int retval;
++      trace_rpm_idle(dev, rpmflags);
+       retval = rpm_check_suspend_allowed(dev);
+       if (retval < 0)
+               ;       /* Conditions are wrong. */
+@@ -257,6 +259,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
+       wake_up_all(&dev->power.wait_queue);
+  out:
++      trace_rpm_return_int(dev, _THIS_IP_, retval);
+       return retval;
+ }
+@@ -301,7 +304,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       struct device *parent = NULL;
+       int retval;
+-      dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
++      trace_rpm_suspend(dev, rpmflags);
+  repeat:
+       retval = rpm_check_suspend_allowed(dev);
+@@ -445,7 +448,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       }
+  out:
+-      dev_dbg(dev, "%s returns %d\n", __func__, retval);
++      trace_rpm_return_int(dev, _THIS_IP_, retval);
+       return retval;
+ }
+@@ -474,7 +477,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+       struct device *parent = NULL;
+       int retval = 0;
+-      dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
++      trace_rpm_resume(dev, rpmflags);
+  repeat:
+       if (dev->power.runtime_error)
+@@ -639,7 +642,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+               spin_lock_irq(&dev->power.lock);
+       }
+-      dev_dbg(dev, "%s returns %d\n", __func__, retval);
++      trace_rpm_return_int(dev, _THIS_IP_, retval);
+       return retval;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0072-PM-OPP-Add-OPP-availability-change-notifier.patch b/patches.runtime_pm/0072-PM-OPP-Add-OPP-availability-change-notifier.patch
new file mode 100644 (file)
index 0000000..3d0432e
--- /dev/null
@@ -0,0 +1,143 @@
+From f467404a23d35af4631fd0472c6e7d7416210bfe Mon Sep 17 00:00:00 2001
+From: MyungJoo Ham <myungjoo.ham@samsung.com>
+Date: Fri, 30 Sep 2011 22:35:12 +0200
+Subject: PM / OPP: Add OPP availability change notifier.
+
+The patch enables to register notifier_block for an OPP-device in order
+to get notified for any changes in the availability of OPPs of the
+device. For example, if a new OPP is inserted or enable/disable status
+of an OPP is changed, the notifier is executed.
+
+This enables the usage of opp_add, opp_enable, and opp_disable to
+directly take effect with any connected entities such as cpufreq or
+devfreq.
+
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Reviewed-by: Mike Turquette <mturquette@ti.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 03ca370fbf7b76d6d002380dbdc2cdc2319f9c80)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/opp.c |   30 ++++++++++++++++++++++++++++++
+ include/linux/opp.h      |   12 ++++++++++++
+ 2 files changed, 42 insertions(+)
+
+diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
+index b23de18..434a6c0 100644
+--- a/drivers/base/power/opp.c
++++ b/drivers/base/power/opp.c
+@@ -73,6 +73,7 @@ struct opp {
+  *            RCU usage: nodes are not modified in the list of device_opp,
+  *            however addition is possible and is secured by dev_opp_list_lock
+  * @dev:      device pointer
++ * @head:     notifier head to notify the OPP availability changes.
+  * @opp_list: list of opps
+  *
+  * This is an internal data structure maintaining the link to opps attached to
+@@ -83,6 +84,7 @@ struct device_opp {
+       struct list_head node;
+       struct device *dev;
++      struct srcu_notifier_head head;
+       struct list_head opp_list;
+ };
+@@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+               }
+               dev_opp->dev = dev;
++              srcu_init_notifier_head(&dev_opp->head);
+               INIT_LIST_HEAD(&dev_opp->opp_list);
+               /* Secure the device list modification */
+@@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+       list_add_rcu(&new_opp->node, head);
+       mutex_unlock(&dev_opp_list_lock);
++      /*
++       * Notify the changes in the availability of the operable
++       * frequency/voltage list.
++       */
++      srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
+       return 0;
+ }
+@@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
+       mutex_unlock(&dev_opp_list_lock);
+       synchronize_rcu();
++      /* Notify the change of the OPP availability */
++      if (availability_req)
++              srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
++                                       new_opp);
++      else
++              srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
++                                       new_opp);
++
+       /* clean up old opp */
+       new_opp = opp;
+       goto out;
+@@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev,
+       *table = NULL;
+ }
+ #endif                /* CONFIG_CPU_FREQ */
++
++/**
++ * opp_get_notifier() - find notifier_head of the device with opp
++ * @dev:      device pointer used to lookup device OPPs.
++ */
++struct srcu_notifier_head *opp_get_notifier(struct device *dev)
++{
++      struct device_opp *dev_opp = find_device_opp(dev);
++
++      if (IS_ERR(dev_opp))
++              return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
++
++      return &dev_opp->head;
++}
+diff --git a/include/linux/opp.h b/include/linux/opp.h
+index 7020e97..87a9208 100644
+--- a/include/linux/opp.h
++++ b/include/linux/opp.h
+@@ -16,9 +16,14 @@
+ #include <linux/err.h>
+ #include <linux/cpufreq.h>
++#include <linux/notifier.h>
+ struct opp;
++enum opp_event {
++      OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
++};
++
+ #if defined(CONFIG_PM_OPP)
+ unsigned long opp_get_voltage(struct opp *opp);
+@@ -40,6 +45,8 @@ int opp_enable(struct device *dev, unsigned long freq);
+ int opp_disable(struct device *dev, unsigned long freq);
++struct srcu_notifier_head *opp_get_notifier(struct device *dev);
++
+ #else
+ static inline unsigned long opp_get_voltage(struct opp *opp)
+ {
+@@ -89,6 +96,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
+ {
+       return 0;
+ }
++
++struct srcu_notifier_head *opp_get_notifier(struct device *dev)
++{
++      return ERR_PTR(-EINVAL);
++}
+ #endif                /* CONFIG_PM */
+ #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0073-PM-OPP-Fix-build-when-CONFIG_PM_OPP-is-not-set.patch b/patches.runtime_pm/0073-PM-OPP-Fix-build-when-CONFIG_PM_OPP-is-not-set.patch
new file mode 100644 (file)
index 0000000..787b3f1
--- /dev/null
@@ -0,0 +1,45 @@
+From c744edfd846b87b4d6e9862546553ff9c2094674 Mon Sep 17 00:00:00 2001
+From: Tony Lindgren <tony@atomide.com>
+Date: Thu, 3 Nov 2011 10:12:27 +0100
+Subject: PM / OPP: Fix build when CONFIG_PM_OPP is not set
+
+Commit 03ca370fbf7b76d6d002380dbdc2cdc2319f9c80 (PM / OPP: Add
+OPP availability change notifier) does not compile if CONFIG_PM_OPP
+is not set:
+
+arch/arm/plat-omap/omap-pm-noop.o: In function `opp_get_notifier':
+include/linux/opp.h:103: multiple definition of `opp_get_notifier'
+include/linux/opp.h:103: first defined here
+
+Also fix incorrect comment.
+
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit a96d69d1b02c4a526bd8c07e0cb10c129025c88c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/opp.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/opp.h b/include/linux/opp.h
+index 87a9208..ee94b33 100644
+--- a/include/linux/opp.h
++++ b/include/linux/opp.h
+@@ -97,11 +97,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
+       return 0;
+ }
+-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
++static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+ {
+       return ERR_PTR(-EINVAL);
+ }
+-#endif                /* CONFIG_PM */
++#endif                /* CONFIG_PM_OPP */
+ #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+ int opp_init_cpufreq_table(struct device *dev,
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0074-PM-QoS-Add-function-dev_pm_qos_read_value-v3.patch b/patches.runtime_pm/0074-PM-QoS-Add-function-dev_pm_qos_read_value-v3.patch
new file mode 100644 (file)
index 0000000..a22e7eb
--- /dev/null
@@ -0,0 +1,463 @@
+From c64acaafb72c31d2afed02554cca37317f9599c2 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 29 Sep 2011 22:29:44 +0200
+Subject: PM / QoS: Add function dev_pm_qos_read_value() (v3)
+
+To read the current PM QoS value for a given device we need to
+make sure that the device's power.constraints object won't be
+removed while we're doing that.  For this reason, put the
+operation under dev->power.lock and acquire the lock
+around the initialization and removal of power.constraints.
+
+Moreover, since we're using the value of power.constraints to
+determine whether or not the object is present, the
+power.constraints_state field isn't necessary any more and may be
+removed.  However, dev_pm_qos_add_request() needs to check if the
+device is being removed from the system before allocating a new
+PM QoS constraints object for it, so make it use the
+power.power_state field of struct device for this purpose.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 1a9a91525d806f2b3bd8b57b963755a96fd36ce2)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/main.c  |    6 +-
+ drivers/base/power/power.h |   10 ++-
+ drivers/base/power/qos.c   |  160 +++++++++++++++++++++++++-------------------
+ include/linux/pm.h         |   10 +--
+ include/linux/pm_qos.h     |   12 +++-
+ 5 files changed, 114 insertions(+), 84 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 956443f..c6291ab 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -22,7 +22,6 @@
+ #include <linux/mutex.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+-#include <linux/pm_qos.h>
+ #include <linux/resume-trace.h>
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+@@ -66,6 +65,7 @@ void device_pm_init(struct device *dev)
+       spin_lock_init(&dev->power.lock);
+       pm_runtime_init(dev);
+       INIT_LIST_HEAD(&dev->power.entry);
++      dev->power.power_state = PMSG_INVALID;
+ }
+ /**
+@@ -97,8 +97,8 @@ void device_pm_add(struct device *dev)
+               dev_warn(dev, "parent %s should not be sleeping\n",
+                       dev_name(dev->parent));
+       list_add_tail(&dev->power.entry, &dpm_list);
+-      mutex_unlock(&dpm_list_mtx);
+       dev_pm_qos_constraints_init(dev);
++      mutex_unlock(&dpm_list_mtx);
+ }
+ /**
+@@ -109,9 +109,9 @@ void device_pm_remove(struct device *dev)
+ {
+       pr_debug("PM: Removing info for %s:%s\n",
+                dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+-      dev_pm_qos_constraints_destroy(dev);
+       complete_all(&dev->power.completion);
+       mutex_lock(&dpm_list_mtx);
++      dev_pm_qos_constraints_destroy(dev);
+       list_del_init(&dev->power.entry);
+       mutex_unlock(&dpm_list_mtx);
+       device_wakeup_disable(dev);
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index f2a25f1..9bf6232 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -1,3 +1,5 @@
++#include <linux/pm_qos.h>
++
+ #ifdef CONFIG_PM_RUNTIME
+ extern void pm_runtime_init(struct device *dev);
+@@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *);
+ static inline void device_pm_init(struct device *dev)
+ {
+       spin_lock_init(&dev->power.lock);
++      dev->power.power_state = PMSG_INVALID;
+       pm_runtime_init(dev);
+ }
++static inline void device_pm_add(struct device *dev)
++{
++      dev_pm_qos_constraints_init(dev);
++}
++
+ static inline void device_pm_remove(struct device *dev)
+ {
++      dev_pm_qos_constraints_destroy(dev);
+       pm_runtime_remove(dev);
+ }
+-static inline void device_pm_add(struct device *dev) {}
+ static inline void device_pm_move_before(struct device *deva,
+                                        struct device *devb) {}
+ static inline void device_pm_move_after(struct device *deva,
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index 8d0b811..91e0614 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -30,15 +30,6 @@
+  * . To minimize the data usage by the per-device constraints, the data struct
+  *   is only allocated at the first call to dev_pm_qos_add_request.
+  * . The data is later free'd when the device is removed from the system.
+- * . The constraints_state variable from dev_pm_info tracks the data struct
+- *    allocation state:
+- *    DEV_PM_QOS_NO_DEVICE: No device present or device removed, no data
+- *     allocated,
+- *    DEV_PM_QOS_DEVICE_PRESENT: Device present, data not allocated and will be
+- *     allocated at the first call to dev_pm_qos_add_request,
+- *    DEV_PM_QOS_ALLOCATED: Device present, data allocated. The per-device
+- *     PM QoS constraints framework is operational and constraints can be
+- *     added, updated or removed using the dev_pm_qos_* API.
+  *  . A global mutex protects the constraints users from the data being
+  *     allocated and free'd.
+  */
+@@ -51,8 +42,30 @@
+ static DEFINE_MUTEX(dev_pm_qos_mtx);
++
+ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
++/**
++ * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
++ * @dev: Device to get the PM QoS constraint value for.
++ */
++s32 dev_pm_qos_read_value(struct device *dev)
++{
++      struct pm_qos_constraints *c;
++      unsigned long flags;
++      s32 ret = 0;
++
++      spin_lock_irqsave(&dev->power.lock, flags);
++
++      c = dev->power.constraints;
++      if (c)
++              ret = pm_qos_read_value(c);
++
++      spin_unlock_irqrestore(&dev->power.lock, flags);
++
++      return ret;
++}
++
+ /*
+  * apply_constraint
+  * @req: constraint request to apply
+@@ -105,27 +118,31 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
+       }
+       BLOCKING_INIT_NOTIFIER_HEAD(n);
++      plist_head_init(&c->list);
++      c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
++      c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
++      c->type = PM_QOS_MIN;
++      c->notifiers = n;
++
++      spin_lock_irq(&dev->power.lock);
+       dev->power.constraints = c;
+-      plist_head_init(&dev->power.constraints->list);
+-      dev->power.constraints->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+-      dev->power.constraints->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+-      dev->power.constraints->type = PM_QOS_MIN;
+-      dev->power.constraints->notifiers = n;
+-      dev->power.constraints_state = DEV_PM_QOS_ALLOCATED;
++      spin_unlock_irq(&dev->power.lock);
+       return 0;
+ }
+ /**
+- * dev_pm_qos_constraints_init
++ * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
+  * @dev: target device
+  *
+- * Called from the device PM subsystem at device insertion
++ * Called from the device PM subsystem during device insertion under
++ * device_pm_lock().
+  */
+ void dev_pm_qos_constraints_init(struct device *dev)
+ {
+       mutex_lock(&dev_pm_qos_mtx);
+-      dev->power.constraints_state = DEV_PM_QOS_DEVICE_PRESENT;
++      dev->power.constraints = NULL;
++      dev->power.power_state = PMSG_ON;
+       mutex_unlock(&dev_pm_qos_mtx);
+ }
+@@ -133,34 +150,38 @@ void dev_pm_qos_constraints_init(struct device *dev)
+  * dev_pm_qos_constraints_destroy
+  * @dev: target device
+  *
+- * Called from the device PM subsystem at device removal
++ * Called from the device PM subsystem on device removal under device_pm_lock().
+  */
+ void dev_pm_qos_constraints_destroy(struct device *dev)
+ {
+       struct dev_pm_qos_request *req, *tmp;
++      struct pm_qos_constraints *c;
+       mutex_lock(&dev_pm_qos_mtx);
+-      if (dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
+-              /* Flush the constraints list for the device */
+-              plist_for_each_entry_safe(req, tmp,
+-                                        &dev->power.constraints->list,
+-                                        node) {
+-                      /*
+-                       * Update constraints list and call the notification
+-                       * callbacks if needed
+-                       */
+-                      apply_constraint(req, PM_QOS_REMOVE_REQ,
+-                                       PM_QOS_DEFAULT_VALUE);
+-                      memset(req, 0, sizeof(*req));
+-              }
++      dev->power.power_state = PMSG_INVALID;
++      c = dev->power.constraints;
++      if (!c)
++              goto out;
+-              kfree(dev->power.constraints->notifiers);
+-              kfree(dev->power.constraints);
+-              dev->power.constraints = NULL;
++      /* Flush the constraints list for the device */
++      plist_for_each_entry_safe(req, tmp, &c->list, node) {
++              /*
++               * Update constraints list and call the notification
++               * callbacks if needed
++               */
++              apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
++              memset(req, 0, sizeof(*req));
+       }
+-      dev->power.constraints_state = DEV_PM_QOS_NO_DEVICE;
++      spin_lock_irq(&dev->power.lock);
++      dev->power.constraints = NULL;
++      spin_unlock_irq(&dev->power.lock);
++
++      kfree(c->notifiers);
++      kfree(c);
++
++ out:
+       mutex_unlock(&dev_pm_qos_mtx);
+ }
+@@ -178,8 +199,9 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
+  *
+  * Returns 1 if the aggregated constraint value has changed,
+  * 0 if the aggregated constraint value has not changed,
+- * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+- * removed from the system
++ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
++ * to allocate for data structures, -ENODEV if the device has just been removed
++ * from the system.
+  */
+ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+                          s32 value)
+@@ -195,28 +217,32 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+               return -EINVAL;
+       }
+-      mutex_lock(&dev_pm_qos_mtx);
+       req->dev = dev;
+-      /* Return if the device has been removed */
+-      if (req->dev->power.constraints_state == DEV_PM_QOS_NO_DEVICE) {
+-              ret = -ENODEV;
+-              goto out;
+-      }
++      mutex_lock(&dev_pm_qos_mtx);
+-      /*
+-       * Allocate the constraints data on the first call to add_request,
+-       * i.e. only if the data is not already allocated and if the device has
+-       * not been removed
+-       */
+-      if (dev->power.constraints_state == DEV_PM_QOS_DEVICE_PRESENT)
+-              ret = dev_pm_qos_constraints_allocate(dev);
++      if (!dev->power.constraints) {
++              if (dev->power.power_state.event == PM_EVENT_INVALID) {
++                      /* The device has been removed from the system. */
++                      req->dev = NULL;
++                      ret = -ENODEV;
++                      goto out;
++              } else {
++                      /*
++                       * Allocate the constraints data on the first call to
++                       * add_request, i.e. only if the data is not already
++                       * allocated and if the device has not been removed.
++                       */
++                      ret = dev_pm_qos_constraints_allocate(dev);
++              }
++      }
+       if (!ret)
+               ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+-out:
++ out:
+       mutex_unlock(&dev_pm_qos_mtx);
++
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+@@ -252,7 +278,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+       mutex_lock(&dev_pm_qos_mtx);
+-      if (req->dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
++      if (req->dev->power.constraints) {
+               if (new_value != req->node.prio)
+                       ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
+                                              new_value);
+@@ -293,7 +319,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+       mutex_lock(&dev_pm_qos_mtx);
+-      if (req->dev->power.constraints_state == DEV_PM_QOS_ALLOCATED) {
++      if (req->dev->power.constraints) {
+               ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
+                                      PM_QOS_DEFAULT_VALUE);
+               memset(req, 0, sizeof(*req));
+@@ -323,15 +349,12 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+       mutex_lock(&dev_pm_qos_mtx);
+-      /* Silently return if the device has been removed */
+-      if (dev->power.constraints_state != DEV_PM_QOS_ALLOCATED)
+-              goto out;
+-
+-      retval = blocking_notifier_chain_register(
+-                      dev->power.constraints->notifiers,
+-                      notifier);
++      /* Silently return if the constraints object is not present. */
++      if (dev->power.constraints)
++              retval = blocking_notifier_chain_register(
++                              dev->power.constraints->notifiers,
++                              notifier);
+-out:
+       mutex_unlock(&dev_pm_qos_mtx);
+       return retval;
+ }
+@@ -354,15 +377,12 @@ int dev_pm_qos_remove_notifier(struct device *dev,
+       mutex_lock(&dev_pm_qos_mtx);
+-      /* Silently return if the device has been removed */
+-      if (dev->power.constraints_state != DEV_PM_QOS_ALLOCATED)
+-              goto out;
+-
+-      retval = blocking_notifier_chain_unregister(
+-                      dev->power.constraints->notifiers,
+-                      notifier);
++      /* Silently return if the constraints object is not present. */
++      if (dev->power.constraints)
++              retval = blocking_notifier_chain_unregister(
++                              dev->power.constraints->notifiers,
++                              notifier);
+-out:
+       mutex_unlock(&dev_pm_qos_mtx);
+       return retval;
+ }
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 70b79e7..91f248b 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -326,6 +326,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
+  *                    requested by a driver.
+  */
++#define PM_EVENT_INVALID      (-1)
+ #define PM_EVENT_ON           0x0000
+ #define PM_EVENT_FREEZE       0x0001
+ #define PM_EVENT_SUSPEND      0x0002
+@@ -346,6 +347,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
+ #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
+ #define PM_EVENT_AUTO_RESUME  (PM_EVENT_AUTO | PM_EVENT_RESUME)
++#define PMSG_INVALID  ((struct pm_message){ .event = PM_EVENT_INVALID, })
+ #define PMSG_ON               ((struct pm_message){ .event = PM_EVENT_ON, })
+ #define PMSG_FREEZE   ((struct pm_message){ .event = PM_EVENT_FREEZE, })
+ #define PMSG_QUIESCE  ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
+@@ -421,13 +423,6 @@ enum rpm_request {
+       RPM_REQ_RESUME,
+ };
+-/* Per-device PM QoS constraints data struct state */
+-enum dev_pm_qos_state {
+-      DEV_PM_QOS_NO_DEVICE,           /* No device present */
+-      DEV_PM_QOS_DEVICE_PRESENT,      /* Device present, data not allocated */
+-      DEV_PM_QOS_ALLOCATED,           /* Device present, data allocated */
+-};
+-
+ struct wakeup_source;
+ struct pm_domain_data {
+@@ -489,7 +484,6 @@ struct dev_pm_info {
+ #endif
+       struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
+       struct pm_qos_constraints *constraints;
+-      enum dev_pm_qos_state   constraints_state;
+ };
+ extern void update_pm_runtime_accounting(struct device *dev);
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index ca7bd3f..83b0ea3 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -7,6 +7,7 @@
+ #include <linux/plist.h>
+ #include <linux/notifier.h>
+ #include <linux/miscdevice.h>
++#include <linux/device.h>
+ #define PM_QOS_RESERVED 0
+ #define PM_QOS_CPU_DMA_LATENCY 1
+@@ -77,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request *req);
+ s32 pm_qos_read_value(struct pm_qos_constraints *c);
++s32 dev_pm_qos_read_value(struct device *dev);
+ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+                          s32 value);
+ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
+@@ -117,6 +119,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
+ static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
+                       { return 0; }
++static inline s32 dev_pm_qos_read_value(struct device *dev)
++                      { return 0; }
+ static inline int dev_pm_qos_add_request(struct device *dev,
+                                        struct dev_pm_qos_request *req,
+                                        s32 value)
+@@ -139,9 +143,13 @@ static inline int dev_pm_qos_remove_global_notifier(
+                                       struct notifier_block *notifier)
+                       { return 0; }
+ static inline void dev_pm_qos_constraints_init(struct device *dev)
+-                      { return; }
++{
++      dev->power.power_state = PMSG_ON;
++}
+ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
+-                      { return; }
++{
++      dev->power.power_state = PMSG_INVALID;
++}
+ #endif
+ #endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0075-PM-QoS-Update-Documentation-for-the-pm_qos-and-dev_p.patch b/patches.runtime_pm/0075-PM-QoS-Update-Documentation-for-the-pm_qos-and-dev_p.patch
new file mode 100644 (file)
index 0000000..2356da2
--- /dev/null
@@ -0,0 +1,155 @@
+From 1b43e3f5a9129a6a94f426c70feb7af110710cd3 Mon Sep 17 00:00:00 2001
+From: Jean Pihet <j-pihet@ti.com>
+Date: Tue, 4 Oct 2011 21:54:45 +0200
+Subject: PM / QoS: Update Documentation for the pm_qos and dev_pm_qos
+ frameworks
+
+Update the documentation for the recently updated pm_qos API, kernel
+and user space.  Add documentation for the per-device PM QoS
+(dev_pm_qos) framework API.
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e3cba3243eb853a052613c804dea033bc4c9cf2d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/pm_qos_interface.txt |   92 ++++++++++++++++++++++++++++--
+ 1 file changed, 87 insertions(+), 5 deletions(-)
+
+diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
+index bfed898..17e130a 100644
+--- a/Documentation/power/pm_qos_interface.txt
++++ b/Documentation/power/pm_qos_interface.txt
+@@ -4,14 +4,19 @@ This interface provides a kernel and user mode interface for registering
+ performance expectations by drivers, subsystems and user space applications on
+ one of the parameters.
+-Currently we have {cpu_dma_latency, network_latency, network_throughput} as the
+-initial set of pm_qos parameters.
++Two different PM QoS frameworks are available:
++1. PM QoS classes for cpu_dma_latency, network_latency, network_throughput.
++2. the per-device PM QoS framework provides the API to manage the per-device latency
++constraints.
+ Each parameters have defined units:
+  * latency: usec
+  * timeout: usec
+  * throughput: kbs (kilo bit / sec)
++
++1. PM QoS framework
++
+ The infrastructure exposes multiple misc device nodes one per implemented
+ parameter.  The set of parameters implement is defined by pm_qos_power_init()
+ and pm_qos_params.h.  This is done because having the available parameters
+@@ -23,14 +28,18 @@ an aggregated target value.  The aggregated target value is updated with
+ changes to the request list or elements of the list.  Typically the
+ aggregated target value is simply the max or min of the request values held
+ in the parameter list elements.
++Note: the aggregated target value is implemented as an atomic variable so that
++reading the aggregated value does not require any locking mechanism.
++
+ From kernel mode the use of this interface is simple:
+-handle = pm_qos_add_request(param_class, target_value):
+-Will insert an element into the list for that identified PM_QOS class with the
++void pm_qos_add_request(handle, param_class, target_value):
++Will insert an element into the list for that identified PM QoS class with the
+ target value.  Upon change to this list the new target is recomputed and any
+ registered notifiers are called only if the target value is now different.
+-Clients of pm_qos need to save the returned handle.
++Clients of pm_qos need to save the returned handle for future use in other
++pm_qos API functions.
+ void pm_qos_update_request(handle, new_target_value):
+ Will update the list element pointed to by the handle with the new target value
+@@ -42,6 +51,20 @@ Will remove the element.  After removal it will update the aggregate target and
+ call the notification tree if the target was changed as a result of removing
+ the request.
++int pm_qos_request(param_class):
++Returns the aggregated value for a given PM QoS class.
++
++int pm_qos_request_active(handle):
++Returns if the request is still active, i.e. it has not been removed from a
++PM QoS class constraints list.
++
++int pm_qos_add_notifier(param_class, notifier):
++Adds a notification callback function to the PM QoS class. The callback is
++called when the aggregated value for the PM QoS class is changed.
++
++int pm_qos_remove_notifier(int param_class, notifier):
++Removes the notification callback function for the PM QoS class.
++
+ From user mode:
+ Only processes can register a pm_qos request.  To provide for automatic
+@@ -63,4 +86,63 @@ To remove the user mode request for a target value simply close the device
+ node.
++2. PM QoS per-device latency framework
++
++For each device a list of performance requests is maintained along with
++an aggregated target value.  The aggregated target value is updated with
++changes to the request list or elements of the list.  Typically the
++aggregated target value is simply the max or min of the request values held
++in the parameter list elements.
++Note: the aggregated target value is implemented as an atomic variable so that
++reading the aggregated value does not require any locking mechanism.
++
++
++From kernel mode the use of this interface is the following:
++
++int dev_pm_qos_add_request(device, handle, value):
++Will insert an element into the list for that identified device with the
++target value.  Upon change to this list the new target is recomputed and any
++registered notifiers are called only if the target value is now different.
++Clients of dev_pm_qos need to save the handle for future use in other
++dev_pm_qos API functions.
++
++int dev_pm_qos_update_request(handle, new_value):
++Will update the list element pointed to by the handle with the new target value
++and recompute the new aggregated target, calling the notification trees if the
++target is changed.
++
++int dev_pm_qos_remove_request(handle):
++Will remove the element.  After removal it will update the aggregate target and
++call the notification trees if the target was changed as a result of removing
++the request.
++
++s32 dev_pm_qos_read_value(device):
++Returns the aggregated value for a given device's constraints list.
++
++
++Notification mechanisms:
++The per-device PM QoS framework has 2 different and distinct notification trees:
++a per-device notification tree and a global notification tree.
++
++int dev_pm_qos_add_notifier(device, notifier):
++Adds a notification callback function for the device.
++The callback is called when the aggregated value of the device constraints list
++is changed.
++
++int dev_pm_qos_remove_notifier(device, notifier):
++Removes the notification callback function for the device.
++
++int dev_pm_qos_add_global_notifier(notifier):
++Adds a notification callback function in the global notification tree of the
++framework.
++The callback is called when the aggregated value for any device is changed.
++
++int dev_pm_qos_remove_global_notifier(notifier):
++Removes the notification callback function from the global notification tree
++of the framework.
++
++
++From user mode:
++No API for user space access to the per-device latency constraints is provided
++yet - still under discussion.
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0076-regulator-Fix-some-bitrot-in-the-machine-driver-docu.patch b/patches.runtime_pm/0076-regulator-Fix-some-bitrot-in-the-machine-driver-docu.patch
new file mode 100644 (file)
index 0000000..55d13f6
--- /dev/null
@@ -0,0 +1,51 @@
+From cb3658082b9edebeb0a05f977b1098b1d96e8208 Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Date: Thu, 8 Sep 2011 10:37:31 -0700
+Subject: regulator: Fix some bitrot in the machine driver documentation
+
+The documentation for the machine driver was rather badly bitrotted,
+using pointers to struct device rather than dev_name() to hook up the
+consumers. Update to use dev_name().
+
+Reported-by: Philip Rakity <prakity@marvell.com>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+(cherry picked from commit 2c1ba398ac9da3305815f6ae8e95ae2b9fd3b5ff)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/regulator/machine.txt |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
+index b42419b..311c61f 100644
+--- a/Documentation/power/regulator/machine.txt
++++ b/Documentation/power/regulator/machine.txt
+@@ -16,7 +16,7 @@ initialisation code by creating a struct regulator_consumer_supply for
+ each regulator.
+ struct regulator_consumer_supply {
+-      struct device *dev;     /* consumer */
++      const char *dev_name;   /* consumer dev_name() */
+       const char *supply;     /* consumer supply - e.g. "vcc" */
+ };
+@@ -24,13 +24,13 @@ e.g. for the machine above
+ static struct regulator_consumer_supply regulator1_consumers[] = {
+ {
+-      .dev    = &platform_consumerB_device.dev,
+-      .supply = "Vcc",
++      .dev_name       = "dev_name(consumer B)",
++      .supply         = "Vcc",
+ },};
+ static struct regulator_consumer_supply regulator2_consumers[] = {
+ {
+-      .dev    = &platform_consumerA_device.dev,
++      .dev    = "dev_name(consumer A"),
+       .supply = "Vcc",
+ },};
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0077-regulator-Clarify-documentation-for-regulator-regula.patch b/patches.runtime_pm/0077-regulator-Clarify-documentation-for-regulator-regula.patch
new file mode 100644 (file)
index 0000000..9d5c1d8
--- /dev/null
@@ -0,0 +1,55 @@
+From eeac24a0edf936c15f1a4c344c98206ac442e74b Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Date: Thu, 8 Sep 2011 10:38:59 -0700
+Subject: regulator: Clarify documentation for regulator-regulator supplies
+
+The mechanism used for connecting regulators together when one regulator
+supplies another wasn't clear as the names being used weren't really tied
+together well.
+
+Reported-by: Philip Rakity <prakity@marvell.com>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+(cherry picked from commit c3035a232e87f42b81d8ece1980abd0a2f26d792)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/regulator/machine.txt |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
+index 311c61f..ce63af0 100644
+--- a/Documentation/power/regulator/machine.txt
++++ b/Documentation/power/regulator/machine.txt
+@@ -43,6 +43,7 @@ to their supply regulator :-
+ static struct regulator_init_data regulator1_data = {
+       .constraints = {
++              .name = "Regulator-1",
+               .min_uV = 3300000,
+               .max_uV = 3300000,
+               .valid_modes_mask = REGULATOR_MODE_NORMAL,
+@@ -51,13 +52,19 @@ static struct regulator_init_data regulator1_data = {
+       .consumer_supplies = regulator1_consumers,
+ };
++The name field should be set to something that is usefully descriptive
++for the board for configuration of supplies for other regulators and
++for use in logging and other diagnostic output.  Normally the name
++used for the supply rail in the schematic is a good choice.  If no
++name is provided then the subsystem will choose one.
++
+ Regulator-1 supplies power to Regulator-2. This relationship must be registered
+ with the core so that Regulator-1 is also enabled when Consumer A enables its
+ supply (Regulator-2). The supply regulator is set by the supply_regulator
+-field below:-
++field below and co:-
+ static struct regulator_init_data regulator2_data = {
+-      .supply_regulator = "regulator_name",
++      .supply_regulator = "Regulator-1",
+       .constraints = {
+               .min_uV = 1800000,
+               .max_uV = 2000000,
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0078-PM-Runtime-Update-document-about-callbacks.patch b/patches.runtime_pm/0078-PM-Runtime-Update-document-about-callbacks.patch
new file mode 100644 (file)
index 0000000..d0e079c
--- /dev/null
@@ -0,0 +1,57 @@
+From d373f46b94d651111175a233540950c8b43175b5 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Sun, 9 Oct 2011 11:40:25 +0800
+Subject: PM / Runtime: Update document about callbacks
+
+Support for device power domains has been introduced in
+commit 9659cc0678b954f187290c6e8b247a673c5d37e1 (PM: Make
+system-wide PM and runtime PM treat subsystems consistently),
+also power domain callbacks will take precedence over subsystem ones
+from commit 4d27e9dcff00a6425d779b065ec8892e4f391661(PM: Make
+power domain callbacks take precedence over subsystem ones).
+
+So update part of "Device Runtime PM Callbacks" in
+Documentation/power/runtime_pm.txt.
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 2fb242adcaab5defa2f208775ac4f181ac998fdd)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 1f05404..0e85608 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -43,13 +43,18 @@ struct dev_pm_ops {
+       ...
+ };
+-The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are
+-executed by the PM core for either the device type, or the class (if the device
+-type's struct dev_pm_ops object does not exist), or the bus type (if the
+-device type's and class' struct dev_pm_ops objects do not exist) of the given
+-device (this allows device types to override callbacks provided by bus types or
+-classes if necessary).  The bus type, device type and class callbacks are
+-referred to as subsystem-level callbacks in what follows.
++The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
++are executed by the PM core for either the power domain, or the device type
++(if the device power domain's struct dev_pm_ops does not exist), or the class
++(if the device power domain's and type's struct dev_pm_ops object does not
++exist), or the bus type (if the device power domain's, type's and class'
++struct dev_pm_ops objects do not exist) of the given device, so the priority
++order of callbacks from high to low is that power domain callbacks, device
++type callbacks, class callbacks and bus type callbacks, and the high priority
++one will take precedence over low priority one. The bus type, device type and
++class callbacks are referred to as subsystem-level callbacks in what follows,
++and generally speaking, the power domain callbacks are used for representing
++power domains within a SoC.
+ By default, the callbacks are always invoked in process context with interrupts
+ enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0079-PM-Runtime-Fix-kerneldoc-comment-for-rpm_suspend.patch b/patches.runtime_pm/0079-PM-Runtime-Fix-kerneldoc-comment-for-rpm_suspend.patch
new file mode 100644 (file)
index 0000000..6801c01
--- /dev/null
@@ -0,0 +1,57 @@
+From 5fd4a40128aef39588491371e45508501db6c3d6 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Wed, 12 Oct 2011 11:53:32 +0800
+Subject: PM / Runtime: Fix kerneldoc comment for rpm_suspend()
+
+This patch fix kerneldoc comments for rpm_suspend():
+
+ - 'Cancel a pending idle notification' should be put before, also
+   should be changed to 'Cancel a pending idle notification,
+   autosuspend or suspend'.
+
+ - idle notification for the device after succeeding suspend has
+   been removed, so update the comment accordingly.
+
+[rjw: Modified the subject and changelog slightly.]
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 47d8f0bac0fda4c15a030f92cd6da6c6bed87459)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |   18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 7a6fb5e..aa23a64 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -286,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+  * @dev: Device to suspend.
+  * @rpmflags: Flag bits.
+  *
+- * Check if the device's runtime PM status allows it to be suspended.  If
+- * another suspend has been started earlier, either return immediately or wait
+- * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
+- * pending idle notification.  If the RPM_ASYNC flag is set then queue a
+- * suspend request; otherwise run the ->runtime_suspend() callback directly.
+- * If a deferred resume was requested while the callback was running then carry
+- * it out; otherwise send an idle notification for the device (if the suspend
+- * failed) or for its parent (if the suspend succeeded).
++ * Check if the device's runtime PM status allows it to be suspended.
++ * Cancel a pending idle notification, autosuspend or suspend. If
++ * another suspend has been started earlier, either return immediately
++ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
++ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
++ * otherwise run the ->runtime_suspend() callback directly. If a deferred
++ * resume was requested while the callback was running then carry it out;
++ * otherwise send an idle notification for its parent (if the suspend
++ * succeeded and both ignore_children of parent->power and irq_safe of
++ * dev->power are not set).
+  *
+  * This function must be called under dev->power.lock with interrupts disabled.
+  */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0080-PM-Runtime-Handle-.runtime_suspend-failure-correctly.patch b/patches.runtime_pm/0080-PM-Runtime-Handle-.runtime_suspend-failure-correctly.patch
new file mode 100644 (file)
index 0000000..4bd0569
--- /dev/null
@@ -0,0 +1,74 @@
+From 62690b36f3600b1838068b1fad62fd97d2323c4f Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Wed, 12 Oct 2011 22:59:33 +0200
+Subject: PM / Runtime: Handle .runtime_suspend() failure correctly
+
+If .runtime_suspend() returns -EAGAIN or -EBUSY, the device should
+still be in ACTIVE state, so it is not necessary to send an idle
+notification to its parent.  If .runtime_suspend() returns other
+fatal failure, it doesn't make sense to send idle notification to
+its parent.
+
+Skip parent idle notification when failure is returned from
+.runtime_suspend() and update comments in rpm_suspend() to reflect
+that change.
+
+[rjw: Modified the subject and changelog slightly.]
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 857b36c7b038ac56a882ee914df93e5985443074)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |   25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index aa23a64..6bb3aaf 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -291,11 +291,11 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+  * another suspend has been started earlier, either return immediately
+  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+- * otherwise run the ->runtime_suspend() callback directly. If a deferred
+- * resume was requested while the callback was running then carry it out;
+- * otherwise send an idle notification for its parent (if the suspend
+- * succeeded and both ignore_children of parent->power and irq_safe of
+- * dev->power are not set).
++ * otherwise run the ->runtime_suspend() callback directly. When
++ * ->runtime_suspend succeeded, if a deferred resume was requested while
++ * the callback was running then carry it out, otherwise send an idle
++ * notification for its parent (if the suspend succeeded and both
++ * ignore_children of parent->power and irq_safe of dev->power are not set).
+  *
+  * This function must be called under dev->power.lock with interrupts disabled.
+  */
+@@ -420,15 +420,16 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+                       dev->power.runtime_error = 0;
+               else
+                       pm_runtime_cancel_pending(dev);
+-      } else {
++              wake_up_all(&dev->power.wait_queue);
++              goto out;
++      }
+  no_callback:
+-              __update_runtime_status(dev, RPM_SUSPENDED);
+-              pm_runtime_deactivate_timer(dev);
++      __update_runtime_status(dev, RPM_SUSPENDED);
++      pm_runtime_deactivate_timer(dev);
+-              if (dev->parent) {
+-                      parent = dev->parent;
+-                      atomic_add_unless(&parent->power.child_count, -1, 0);
+-              }
++      if (dev->parent) {
++              parent = dev->parent;
++              atomic_add_unless(&parent->power.child_count, -1, 0);
+       }
+       wake_up_all(&dev->power.wait_queue);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0081-PM-Suspend-Add-statistics-debugfs-file-for-suspend-t.patch b/patches.runtime_pm/0081-PM-Suspend-Add-statistics-debugfs-file-for-suspend-t.patch
new file mode 100644 (file)
index 0000000..2451cab
--- /dev/null
@@ -0,0 +1,409 @@
+From 43e6e6a4986089766acd4da2030dba32f921f075 Mon Sep 17 00:00:00 2001
+From: ShuoX Liu <shuox.liu@intel.com>
+Date: Wed, 10 Aug 2011 23:01:26 +0200
+Subject: PM / Suspend: Add statistics debugfs file for suspend to RAM
+
+Record S3 failure time about each reason and the latest two failed
+devices' names in S3 progress.
+We can check it through 'suspend_stats' entry in debugfs.
+
+The motivation of the patch:
+
+We are enabling power features on Medfield. Comparing with PC/notebook,
+a mobile enters/exits suspend-2-ram (we call it s3 on Medfield) far
+more frequently. If it can't enter suspend-2-ram in time, the power
+might be used up soon.
+
+We often find sometimes, a device suspend fails. Then, system retries
+s3 over and over again. As display is off, testers and developers
+don't know what happens.
+
+Some testers and developers complain they don't know if system
+tries suspend-2-ram, and what device fails to suspend. They need
+such info for a quick check. The patch adds suspend_stats under
+debugfs for users to check suspend to RAM statistics quickly.
+
+If not using this patch, we have other methods to get info about
+what device fails. One is to turn on  CONFIG_PM_DEBUG, but users
+would get too much info and testers need recompile the system.
+
+In addition, dynamic debug is another good tool to dump debug info.
+But it still doesn't match our utilization scenario closely.
+1) user need write a user space parser to process the syslog output;
+2) Our testing scenario is we leave the mobile for at least hours.
+   Then, check its status. No serial console available during the
+   testing. One is because console would be suspended, and the other
+   is serial console connecting with spi or HSU devices would consume
+   power. These devices are powered off at suspend-2-ram.
+
+Signed-off-by: ShuoX Liu <shuox.liu@intel.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 2a77c46de1e3dace73745015635ebbc648eca69c)
+
+Conflicts:
+
+       kernel/power/suspend.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/basic-pm-debugging.txt |   24 +++++++
+ drivers/base/power/main.c                  |   31 +++++++--
+ include/linux/suspend.h                    |   52 ++++++++++++++
+ kernel/power/main.c                        |  102 ++++++++++++++++++++++++++++
+ kernel/power/suspend.c                     |   17 ++++-
+ 5 files changed, 218 insertions(+), 8 deletions(-)
+
+diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
+index 05a7fe7..40a4c65 100644
+--- a/Documentation/power/basic-pm-debugging.txt
++++ b/Documentation/power/basic-pm-debugging.txt
+@@ -201,3 +201,27 @@ case, you may be able to search for failing drivers by following the procedure
+ analogous to the one described in section 1.  If you find some failing drivers,
+ you will have to unload them every time before an STR transition (ie. before
+ you run s2ram), and please report the problems with them.
++
++There is a debugfs entry which shows the suspend to RAM statistics. Here is an
++example of its output.
++      # mount -t debugfs none /sys/kernel/debug
++      # cat /sys/kernel/debug/suspend_stats
++      success: 20
++      fail: 5
++      failed_freeze: 0
++      failed_prepare: 0
++      failed_suspend: 5
++      failed_suspend_noirq: 0
++      failed_resume: 0
++      failed_resume_noirq: 0
++      failures:
++        last_failed_dev:      alarm
++                              adc
++        last_failed_errno:    -16
++                              -16
++        last_failed_step:     suspend
++                              suspend
++Field success means the success number of suspend to RAM, and field fail means
++the failure number. Others are the failure number of different steps of suspend
++to RAM. suspend_stats just lists the last 2 failed devices, error number and
++failed step of suspend.
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c6291ab..b1b5826 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list);
+ LIST_HEAD(dpm_suspended_list);
+ LIST_HEAD(dpm_noirq_list);
++struct suspend_stats suspend_stats;
+ static DEFINE_MUTEX(dpm_list_mtx);
+ static pm_message_t pm_transition;
+@@ -467,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state)
+               mutex_unlock(&dpm_list_mtx);
+               error = device_resume_noirq(dev, state);
+-              if (error)
++              if (error) {
++                      suspend_stats.failed_resume_noirq++;
++                      dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
++                      dpm_save_failed_dev(dev_name(dev));
+                       pm_dev_err(dev, state, " early", error);
++              }
+               mutex_lock(&dpm_list_mtx);
+               put_device(dev);
+@@ -629,8 +634,12 @@ void dpm_resume(pm_message_t state)
+                       mutex_unlock(&dpm_list_mtx);
+                       error = device_resume(dev, state, false);
+-                      if (error)
++                      if (error) {
++                              suspend_stats.failed_resume++;
++                              dpm_save_failed_step(SUSPEND_RESUME);
++                              dpm_save_failed_dev(dev_name(dev));
+                               pm_dev_err(dev, state, "", error);
++                      }
+                       mutex_lock(&dpm_list_mtx);
+               }
+@@ -805,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state)
+               mutex_lock(&dpm_list_mtx);
+               if (error) {
+                       pm_dev_err(dev, state, " late", error);
++                      suspend_stats.failed_suspend_noirq++;
++                      dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
++                      dpm_save_failed_dev(dev_name(dev));
+                       put_device(dev);
+                       break;
+               }
+@@ -926,8 +938,10 @@ static void async_suspend(void *data, async_cookie_t cookie)
+       int error;
+       error = __device_suspend(dev, pm_transition, true);
+-      if (error)
++      if (error) {
++              dpm_save_failed_dev(dev_name(dev));
+               pm_dev_err(dev, pm_transition, " async", error);
++      }
+       put_device(dev);
+ }
+@@ -970,6 +984,7 @@ int dpm_suspend(pm_message_t state)
+               mutex_lock(&dpm_list_mtx);
+               if (error) {
+                       pm_dev_err(dev, state, "", error);
++                      dpm_save_failed_dev(dev_name(dev));
+                       put_device(dev);
+                       break;
+               }
+@@ -983,7 +998,10 @@ int dpm_suspend(pm_message_t state)
+       async_synchronize_full();
+       if (!error)
+               error = async_error;
+-      if (!error)
++      if (error) {
++              suspend_stats.failed_suspend++;
++              dpm_save_failed_step(SUSPEND_SUSPEND);
++      } else
+               dpm_show_time(starttime, state, NULL);
+       return error;
+ }
+@@ -1091,7 +1109,10 @@ int dpm_suspend_start(pm_message_t state)
+       int error;
+       error = dpm_prepare(state);
+-      if (!error)
++      if (error) {
++              suspend_stats.failed_prepare++;
++              dpm_save_failed_step(SUSPEND_PREPARE);
++      } else
+               error = dpm_suspend(state);
+       return error;
+ }
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index e1e3742..94eb364 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -34,6 +34,58 @@ typedef int __bitwise suspend_state_t;
+ #define PM_SUSPEND_MEM                ((__force suspend_state_t) 3)
+ #define PM_SUSPEND_MAX                ((__force suspend_state_t) 4)
++enum suspend_stat_step {
++      SUSPEND_FREEZE = 1,
++      SUSPEND_PREPARE,
++      SUSPEND_SUSPEND,
++      SUSPEND_SUSPEND_NOIRQ,
++      SUSPEND_RESUME_NOIRQ,
++      SUSPEND_RESUME
++};
++
++struct suspend_stats {
++      int     success;
++      int     fail;
++      int     failed_freeze;
++      int     failed_prepare;
++      int     failed_suspend;
++      int     failed_suspend_noirq;
++      int     failed_resume;
++      int     failed_resume_noirq;
++#define       REC_FAILED_NUM  2
++      int     last_failed_dev;
++      char    failed_devs[REC_FAILED_NUM][40];
++      int     last_failed_errno;
++      int     errno[REC_FAILED_NUM];
++      int     last_failed_step;
++      enum suspend_stat_step  failed_steps[REC_FAILED_NUM];
++};
++
++extern struct suspend_stats suspend_stats;
++
++static inline void dpm_save_failed_dev(const char *name)
++{
++      strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
++              name,
++              sizeof(suspend_stats.failed_devs[0]));
++      suspend_stats.last_failed_dev++;
++      suspend_stats.last_failed_dev %= REC_FAILED_NUM;
++}
++
++static inline void dpm_save_failed_errno(int err)
++{
++      suspend_stats.errno[suspend_stats.last_failed_errno] = err;
++      suspend_stats.last_failed_errno++;
++      suspend_stats.last_failed_errno %= REC_FAILED_NUM;
++}
++
++static inline void dpm_save_failed_step(enum suspend_stat_step step)
++{
++      suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
++      suspend_stats.last_failed_step++;
++      suspend_stats.last_failed_step %= REC_FAILED_NUM;
++}
++
+ /**
+  * struct platform_suspend_ops - Callbacks for managing platform dependent
+  *    system sleep states.
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 6c601f8..2757acb 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -12,6 +12,8 @@
+ #include <linux/string.h>
+ #include <linux/resume-trace.h>
+ #include <linux/workqueue.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
+ #include "power.h"
+@@ -133,6 +135,101 @@ power_attr(pm_test);
+ #endif /* CONFIG_PM_SLEEP */
++#ifdef CONFIG_DEBUG_FS
++static char *suspend_step_name(enum suspend_stat_step step)
++{
++      switch (step) {
++      case SUSPEND_FREEZE:
++              return "freeze";
++      case SUSPEND_PREPARE:
++              return "prepare";
++      case SUSPEND_SUSPEND:
++              return "suspend";
++      case SUSPEND_SUSPEND_NOIRQ:
++              return "suspend_noirq";
++      case SUSPEND_RESUME_NOIRQ:
++              return "resume_noirq";
++      case SUSPEND_RESUME:
++              return "resume";
++      default:
++              return "";
++      }
++}
++
++static int suspend_stats_show(struct seq_file *s, void *unused)
++{
++      int i, index, last_dev, last_errno, last_step;
++
++      last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
++      last_dev %= REC_FAILED_NUM;
++      last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
++      last_errno %= REC_FAILED_NUM;
++      last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
++      last_step %= REC_FAILED_NUM;
++      seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
++                      "%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
++                      "success", suspend_stats.success,
++                      "fail", suspend_stats.fail,
++                      "failed_freeze", suspend_stats.failed_freeze,
++                      "failed_prepare", suspend_stats.failed_prepare,
++                      "failed_suspend", suspend_stats.failed_suspend,
++                      "failed_suspend_noirq",
++                              suspend_stats.failed_suspend_noirq,
++                      "failed_resume", suspend_stats.failed_resume,
++                      "failed_resume_noirq",
++                              suspend_stats.failed_resume_noirq);
++      seq_printf(s,   "failures:\n  last_failed_dev:\t%-s\n",
++                      suspend_stats.failed_devs[last_dev]);
++      for (i = 1; i < REC_FAILED_NUM; i++) {
++              index = last_dev + REC_FAILED_NUM - i;
++              index %= REC_FAILED_NUM;
++              seq_printf(s, "\t\t\t%-s\n",
++                      suspend_stats.failed_devs[index]);
++      }
++      seq_printf(s,   "  last_failed_errno:\t%-d\n",
++                      suspend_stats.errno[last_errno]);
++      for (i = 1; i < REC_FAILED_NUM; i++) {
++              index = last_errno + REC_FAILED_NUM - i;
++              index %= REC_FAILED_NUM;
++              seq_printf(s, "\t\t\t%-d\n",
++                      suspend_stats.errno[index]);
++      }
++      seq_printf(s,   "  last_failed_step:\t%-s\n",
++                      suspend_step_name(
++                              suspend_stats.failed_steps[last_step]));
++      for (i = 1; i < REC_FAILED_NUM; i++) {
++              index = last_step + REC_FAILED_NUM - i;
++              index %= REC_FAILED_NUM;
++              seq_printf(s, "\t\t\t%-s\n",
++                      suspend_step_name(
++                              suspend_stats.failed_steps[index]));
++      }
++
++      return 0;
++}
++
++static int suspend_stats_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, suspend_stats_show, NULL);
++}
++
++static const struct file_operations suspend_stats_operations = {
++      .open           = suspend_stats_open,
++      .read           = seq_read,
++      .llseek         = seq_lseek,
++      .release        = single_release,
++};
++
++static int __init pm_debugfs_init(void)
++{
++      debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
++                      NULL, NULL, &suspend_stats_operations);
++      return 0;
++}
++
++late_initcall(pm_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
++
+ struct kobject *power_kobj;
+ /**
+@@ -194,6 +291,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+       }
+       if (state < PM_SUSPEND_MAX && *s)
+               error = enter_state(state);
++              if (error) {
++                      suspend_stats.fail++;
++                      dpm_save_failed_errno(error);
++              } else
++                      suspend_stats.success++;
+ #endif
+  Exit:
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index d3caa76..fdd4263 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -104,7 +104,10 @@ static int suspend_prepare(void)
+               goto Finish;
+       error = suspend_freeze_processes();
+-      if (!error)
++      if (error) {
++              suspend_stats.failed_freeze++;
++              dpm_save_failed_step(SUSPEND_FREEZE);
++      } else
+               return 0;
+       suspend_thaw_processes();
+@@ -315,8 +318,16 @@ int enter_state(suspend_state_t state)
+  */
+ int pm_suspend(suspend_state_t state)
+ {
+-      if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
+-              return enter_state(state);
++      int ret;
++      if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) {
++              ret = enter_state(state);
++              if (ret) {
++                      suspend_stats.fail++;
++                      dpm_save_failed_errno(ret);
++              } else
++                      suspend_stats.success++;
++              return ret;
++      }
+       return -EINVAL;
+ }
+ EXPORT_SYMBOL(pm_suspend);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0082-PM-Fix-build-issue-in-main.c-for-CONFIG_PM_SLEEP-uns.patch b/patches.runtime_pm/0082-PM-Fix-build-issue-in-main.c-for-CONFIG_PM_SLEEP-uns.patch
new file mode 100644 (file)
index 0000000..0990b53
--- /dev/null
@@ -0,0 +1,41 @@
+From 270cf1054540fd12aaa6fbe9cbc4b9f6cc7f9698 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 11 Aug 2011 22:38:12 +0200
+Subject: PM: Fix build issue in main.c for CONFIG_PM_SLEEP unset
+
+Suspend statistics should depend on CONFIG_PM_SLEEP, so make that
+happen.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit ca123102f69fb260221502ade9bbc069290fae84)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/main.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 2757acb..a52e884 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -133,8 +133,6 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
+ power_attr(pm_test);
+ #endif /* CONFIG_PM_DEBUG */
+-#endif /* CONFIG_PM_SLEEP */
+-
+ #ifdef CONFIG_DEBUG_FS
+ static char *suspend_step_name(enum suspend_stat_step step)
+ {
+@@ -230,6 +228,8 @@ static int __init pm_debugfs_init(void)
+ late_initcall(pm_debugfs_init);
+ #endif /* CONFIG_DEBUG_FS */
++#endif /* CONFIG_PM_SLEEP */
++
+ struct kobject *power_kobj;
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0083-PM-Hibernate-Include-storage-keys-in-hibernation-ima.patch b/patches.runtime_pm/0083-PM-Hibernate-Include-storage-keys-in-hibernation-ima.patch
new file mode 100644 (file)
index 0000000..2444b83
--- /dev/null
@@ -0,0 +1,302 @@
+From e17f661192c2c472eef4e73b76206f3afc129f16 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 17 Aug 2011 20:42:24 +0200
+Subject: PM / Hibernate: Include storage keys in hibernation image on s390
+
+For s390 there is one additional byte associated with each page,
+the storage key. This byte contains the referenced and changed
+bits and needs to be included into the hibernation image.
+If the storage keys are not restored to their previous state all
+original pages would appear to be dirty. This can cause
+inconsistencies e.g. with read-only filesystems.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 85055dd805f0822f13f736bee2a521e222c38293)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ arch/s390/Kconfig               |    1 
+ arch/s390/kernel/suspend.c      |  118 ++++++++++++++++++++++++++++++++++++++++
+ arch/s390/kernel/swsusp_asm64.S |    3 +
+ include/linux/suspend.h         |   34 +++++++++++
+ kernel/power/Kconfig            |    3 +
+ kernel/power/snapshot.c         |   18 ++++++
+ 6 files changed, 177 insertions(+)
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -89,6 +89,7 @@ config S390
+       select HAVE_GET_USER_PAGES_FAST
+       select HAVE_ARCH_MUTEX_CPU_RELAX
+       select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
++      select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+       select ARCH_INLINE_SPIN_TRYLOCK
+       select ARCH_INLINE_SPIN_TRYLOCK_BH
+       select ARCH_INLINE_SPIN_LOCK
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -7,6 +7,7 @@
+  */
+ #include <linux/pfn.h>
++#include <linux/mm.h>
+ #include <asm/system.h>
+ /*
+@@ -14,6 +15,123 @@
+  */
+ extern const void __nosave_begin, __nosave_end;
++/*
++ * The restore of the saved pages in an hibernation image will set
++ * the change and referenced bits in the storage key for each page.
++ * Overindication of the referenced bits after an hibernation cycle
++ * does not cause any harm but the overindication of the change bits
++ * would cause trouble.
++ * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
++ * page to the most significant byte of the associated page frame
++ * number in the hibernation image.
++ */
++
++/*
++ * Key storage is allocated as a linked list of pages.
++ * The size of the keys array is (PAGE_SIZE - sizeof(long))
++ */
++struct page_key_data {
++      struct page_key_data *next;
++      unsigned char data[];
++};
++
++#define PAGE_KEY_DATA_SIZE    (PAGE_SIZE - sizeof(struct page_key_data *))
++
++static struct page_key_data *page_key_data;
++static struct page_key_data *page_key_rp, *page_key_wp;
++static unsigned long page_key_rx, page_key_wx;
++
++/*
++ * For each page in the hibernation image one additional byte is
++ * stored in the most significant byte of the page frame number.
++ * On suspend no additional memory is required but on resume the
++ * keys need to be memorized until the page data has been restored.
++ * Only then can the storage keys be set to their old state.
++ */
++unsigned long page_key_additional_pages(unsigned long pages)
++{
++      return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
++}
++
++/*
++ * Free page_key_data list of arrays.
++ */
++void page_key_free(void)
++{
++      struct page_key_data *pkd;
++
++      while (page_key_data) {
++              pkd = page_key_data;
++              page_key_data = pkd->next;
++              free_page((unsigned long) pkd);
++      }
++}
++
++/*
++ * Allocate page_key_data list of arrays with enough room to store
++ * one byte for each page in the hibernation image.
++ */
++int page_key_alloc(unsigned long pages)
++{
++      struct page_key_data *pk;
++      unsigned long size;
++
++      size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
++      while (size--) {
++              pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
++              if (!pk) {
++                      page_key_free();
++                      return -ENOMEM;
++              }
++              pk->next = page_key_data;
++              page_key_data = pk;
++      }
++      page_key_rp = page_key_wp = page_key_data;
++      page_key_rx = page_key_wx = 0;
++      return 0;
++}
++
++/*
++ * Save the storage key into the upper 8 bits of the page frame number.
++ */
++void page_key_read(unsigned long *pfn)
++{
++      unsigned long addr;
++
++      addr = (unsigned long) page_address(pfn_to_page(*pfn));
++      *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
++}
++
++/*
++ * Extract the storage key from the upper 8 bits of the page frame number
++ * and store it in the page_key_data list of arrays.
++ */
++void page_key_memorize(unsigned long *pfn)
++{
++      page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
++      *(unsigned char *) pfn = 0;
++      if (++page_key_wx < PAGE_KEY_DATA_SIZE)
++              return;
++      page_key_wp = page_key_wp->next;
++      page_key_wx = 0;
++}
++
++/*
++ * Get the next key from the page_key_data list of arrays and set the
++ * storage key of the page referred by @address. If @address refers to
++ * a "safe" page the swsusp_arch_resume code will transfer the storage
++ * key from the buffer page to the original page.
++ */
++void page_key_write(void *address)
++{
++      page_set_storage_key((unsigned long) address,
++                           page_key_rp->data[page_key_rx], 0);
++      if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
++              return;
++      page_key_rp = page_key_rp->next;
++      page_key_rx = 0;
++}
++
+ int pfn_is_nosave(unsigned long pfn)
+ {
+       unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+--- a/arch/s390/kernel/swsusp_asm64.S
++++ b/arch/s390/kernel/swsusp_asm64.S
+@@ -138,11 +138,14 @@ swsusp_arch_resume:
+ 0:
+       lg      %r2,8(%r1)
+       lg      %r4,0(%r1)
++      iske    %r0,%r4
+       lghi    %r3,PAGE_SIZE
+       lghi    %r5,PAGE_SIZE
+ 1:
+       mvcle   %r2,%r4,0
+       jo      1b
++      lg      %r2,8(%r1)
++      sske    %r0,%r2
+       lg      %r1,16(%r1)
+       ltgr    %r1,%r1
+       jnz     0b
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -378,4 +378,38 @@ static inline void unlock_system_sleep(v
+ }
+ #endif
++#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
++/*
++ * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
++ * to save/restore additional information to/from the array of page
++ * frame numbers in the hibernation image. For s390 this is used to
++ * save and restore the storage key for each page that is included
++ * in the hibernation image.
++ */
++unsigned long page_key_additional_pages(unsigned long pages);
++int page_key_alloc(unsigned long pages);
++void page_key_free(void);
++void page_key_read(unsigned long *pfn);
++void page_key_memorize(unsigned long *pfn);
++void page_key_write(void *address);
++
++#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
++
++static inline unsigned long page_key_additional_pages(unsigned long pages)
++{
++      return 0;
++}
++
++static inline int  page_key_alloc(unsigned long pages)
++{
++      return 0;
++}
++
++static inline void page_key_free(void) {}
++static inline void page_key_read(unsigned long *pfn) {}
++static inline void page_key_memorize(unsigned long *pfn) {}
++static inline void page_key_write(void *address) {}
++
++#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
++
+ #endif /* _LINUX_SUSPEND_H */
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -65,6 +65,9 @@ config HIBERNATION
+         For more information take a look at <file:Documentation/power/swsusp.txt>.
++config ARCH_SAVE_PAGE_KEYS
++      bool
++
+ config PM_STD_PARTITION
+       string "Default resume partition"
+       depends on HIBERNATION
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1339,6 +1339,9 @@ int hibernate_preallocate_memory(void)
+       count += highmem;
+       count -= totalreserve_pages;
++      /* Add number of pages required for page keys (s390 only). */
++      size += page_key_additional_pages(saveable);
++
+       /* Compute the maximum number of saveable pages to leave in memory. */
+       max_size = (count - (size + PAGES_FOR_IO)) / 2
+                       - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
+@@ -1662,6 +1665,8 @@ pack_pfns(unsigned long *buf, struct mem
+               buf[j] = memory_bm_next_pfn(bm);
+               if (unlikely(buf[j] == BM_END_OF_MAP))
+                       break;
++              /* Save page key for data page (s390 only). */
++              page_key_read(buf + j);
+       }
+ }
+@@ -1821,6 +1826,9 @@ static int unpack_orig_pfns(unsigned lon
+               if (unlikely(buf[j] == BM_END_OF_MAP))
+                       break;
++              /* Extract and buffer page key for data page (s390 only). */
++              page_key_memorize(buf + j);
++
+               if (memory_bm_pfn_present(bm, buf[j]))
+                       memory_bm_set_bit(bm, buf[j]);
+               else
+@@ -2223,6 +2231,11 @@ int snapshot_write_next(struct snapshot_
+               if (error)
+                       return error;
++              /* Allocate buffer for page keys. */
++              error = page_key_alloc(nr_copy_pages);
++              if (error)
++                      return error;
++
+       } else if (handle->cur <= nr_meta_pages + 1) {
+               error = unpack_orig_pfns(buffer, &copy_bm);
+               if (error)
+@@ -2243,6 +2256,8 @@ int snapshot_write_next(struct snapshot_
+               }
+       } else {
+               copy_last_highmem_page();
++              /* Restore page key for data page (s390 only). */
++              page_key_write(handle->buffer);
+               handle->buffer = get_buffer(&orig_bm, &ca);
+               if (IS_ERR(handle->buffer))
+                       return PTR_ERR(handle->buffer);
+@@ -2264,6 +2279,9 @@ int snapshot_write_next(struct snapshot_
+ void snapshot_write_finalize(struct snapshot_handle *handle)
+ {
+       copy_last_highmem_page();
++      /* Restore page key for data page (s390 only). */
++      page_key_write(handle->buffer);
++      page_key_free();
+       /* Free only if we have loaded the image entirely */
+       if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
+               memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
diff --git a/patches.runtime_pm/0084-PM-VT-Cleanup-if-defined-uglyness-and-fix-compile-er.patch b/patches.runtime_pm/0084-PM-VT-Cleanup-if-defined-uglyness-and-fix-compile-er.patch
new file mode 100644 (file)
index 0000000..7d1b193
--- /dev/null
@@ -0,0 +1,114 @@
+From 0cc855c7df3330e33ea47b560bd707c4808a98b0 Mon Sep 17 00:00:00 2001
+From: H Hartley Sweeten <hartleys@visionengravers.com>
+Date: Wed, 21 Sep 2011 22:47:55 +0200
+Subject: PM / VT: Cleanup #if defined uglyness and fix compile error
+
+Introduce the config option CONFIG_VT_CONSOLE_SLEEP in order to cleanup
+the #if defined ugliness for the vt suspend support functions. Note that
+CONFIG_VT_CONSOLE is already dependant on CONFIG_VT.
+
+The function pm_set_vt_switch is actually dependant on CONFIG_VT and not
+CONFIG_PM_SLEEP. This fixes a compile error when CONFIG_PM_SLEEP is
+not set:
+
+drivers/tty/vt/vt_ioctl.c:1794: error: redefinition of 'pm_set_vt_switch'
+include/linux/suspend.h:17: error: previous definition of 'pm_set_vt_switch' was here
+
+Also, remove the incorrect path from the comment in console.c.
+
+[rjw: Replaced #if defined() with #ifdef in suspend.h.]
+
+Signed-off-by: H Hartley Sweeten <hsweeten@visionengravers.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 37cce26b32142f09a8967f6d238178af654b20de)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/tty/Kconfig     |    4 ++++
+ include/linux/suspend.h |    9 ++++++---
+ kernel/power/Makefile   |    2 +-
+ kernel/power/console.c  |    4 +---
+ 4 files changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
+index bd7cc05..a7188a0 100644
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -60,6 +60,10 @@ config VT_CONSOLE
+         If unsure, say Y.
++config VT_CONSOLE_SLEEP
++      def_bool y
++      depends on VT_CONSOLE && PM_SLEEP
++
+ config HW_CONSOLE
+       bool
+       depends on VT && !S390 && !UML
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index 720a465..c08069d 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -8,15 +8,18 @@
+ #include <linux/mm.h>
+ #include <asm/errno.h>
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
++#ifdef CONFIG_VT
+ extern void pm_set_vt_switch(int);
+-extern int pm_prepare_console(void);
+-extern void pm_restore_console(void);
+ #else
+ static inline void pm_set_vt_switch(int do_switch)
+ {
+ }
++#endif
++#ifdef CONFIG_VT_CONSOLE_SLEEP
++extern int pm_prepare_console(void);
++extern void pm_restore_console(void);
++#else
+ static inline int pm_prepare_console(void)
+ {
+       return 0;
+diff --git a/kernel/power/Makefile b/kernel/power/Makefile
+index ad6bdd8..07e0e28 100644
+--- a/kernel/power/Makefile
++++ b/kernel/power/Makefile
+@@ -2,7 +2,7 @@
+ ccflags-$(CONFIG_PM_DEBUG)    := -DDEBUG
+ obj-$(CONFIG_PM)              += main.o qos.o
+-obj-$(CONFIG_PM_SLEEP)                += console.o
++obj-$(CONFIG_VT_CONSOLE_SLEEP)        += console.o
+ obj-$(CONFIG_FREEZER)         += process.o
+ obj-$(CONFIG_SUSPEND)         += suspend.o
+ obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
+diff --git a/kernel/power/console.c b/kernel/power/console.c
+index 218e5af..b1dc456 100644
+--- a/kernel/power/console.c
++++ b/kernel/power/console.c
+@@ -1,5 +1,5 @@
+ /*
+- * drivers/power/process.c - Functions for saving/restoring console.
++ * Functions for saving/restoring console.
+  *
+  * Originally from swsusp.
+  */
+@@ -10,7 +10,6 @@
+ #include <linux/module.h>
+ #include "power.h"
+-#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+ #define SUSPEND_CONSOLE       (MAX_NR_CONSOLES-1)
+ static int orig_fgconsole, orig_kmsg;
+@@ -32,4 +31,3 @@ void pm_restore_console(void)
+               vt_kmsg_redirect(orig_kmsg);
+       }
+ }
+-#endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0085-PM-Update-the-policy-on-default-wakeup-settings.patch b/patches.runtime_pm/0085-PM-Update-the-policy-on-default-wakeup-settings.patch
new file mode 100644 (file)
index 0000000..ef49491
--- /dev/null
@@ -0,0 +1,52 @@
+From 6d18c014d73dbd7c10590cea93e587bf8adeef2b Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Mon, 26 Sep 2011 17:38:50 +0200
+Subject: PM: Update the policy on default wakeup settings
+
+This patch (as1485) documents a change to the kernel's default wakeup
+policy.  Devices that forward wakeup requests between buses should be
+enabled for wakeup by default.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 8f88893c05f2f677f18f2ce5591b4bed5d4a7535)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt |    4 +++-
+ drivers/base/power/wakeup.c     |    4 +++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 3384d59..29b7a98 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -152,7 +152,9 @@ try to use its wakeup mechanism.  device_set_wakeup_enable() affects this flag;
+ for the most part drivers should not change its value.  The initial value of
+ should_wakeup is supposed to be false for the majority of devices; the major
+ exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
+-(wake-on-LAN) feature has been set up with ethtool.
++(wake-on-LAN) feature has been set up with ethtool.  It should also default
++to true for devices that don't generate wakeup requests on their own but merely
++forward wakeup requests from one bus to another (like PCI bridges).
+ Whether or not a device is capable of issuing wakeup events is a hardware
+ matter, and the kernel is responsible for keeping track of it.  By contrast,
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index 84f7c7d..14ee07e 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
+  *
+  * By default, most devices should leave wakeup disabled.  The exceptions are
+  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+- * possibly network interfaces, etc.
++ * possibly network interfaces, etc.  Also, devices that don't generate their
++ * own wakeup requests but merely forward requests from one bus to another
++ * (like PCI bridges) should have wakeup enabled by default.
+  */
+ int device_init_wakeup(struct device *dev, bool enable)
+ {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0086-PM-Hibernate-Freeze-kernel-threads-after-preallocati.patch b/patches.runtime_pm/0086-PM-Hibernate-Freeze-kernel-threads-after-preallocati.patch
new file mode 100644 (file)
index 0000000..1993d3a
--- /dev/null
@@ -0,0 +1,171 @@
+From c187bba1e356ed875f2433fb19e40ef1989f2ac9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 26 Sep 2011 20:32:27 +0200
+Subject: PM / Hibernate: Freeze kernel threads after preallocating memory
+
+There is a problem with the current ordering of hibernate code which
+leads to deadlocks in some filesystems' memory shrinkers.  Namely,
+some filesystems use freezable kernel threads that are inactive when
+the hibernate memory preallocation is carried out.  Those same
+filesystems use memory shrinkers that may be triggered by the
+hibernate memory preallocation.  If those memory shrinkers wait for
+the frozen kernel threads, the hibernate process deadlocks (this
+happens with XFS, for one example).
+
+Apparently, it is not technically viable to redesign the filesystems
+in question to avoid the situation described above, so the only
+possible solution of this issue is to defer the freezing of kernel
+threads until the hibernate memory preallocation is done, which is
+implemented by this change.
+
+Unfortunately, this requires the memory preallocation to be done
+before the "prepare" stage of device freeze, so after this change the
+only way drivers can allocate additional memory for their freeze
+routines in a clean way is to use PM notifiers.
+
+Reported-by: Christoph <cr2005@u-club.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 2aede851ddf08666f68ffc17be446420e9d2a056)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt |    4 ----
+ include/linux/freezer.h         |    4 +++-
+ kernel/power/hibernate.c        |   12 ++++++++----
+ kernel/power/power.h            |    3 ++-
+ kernel/power/process.c          |   30 ++++++++++++++++++++----------
+ 5 files changed, 33 insertions(+), 20 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 29b7a98..646a89e 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -281,10 +281,6 @@ When the system goes into the standby or memory sleep state, the phases are:
+       time.)  Unlike the other suspend-related phases, during the prepare
+       phase the device tree is traversed top-down.
+-      In addition to that, if device drivers need to allocate additional
+-      memory to be able to hadle device suspend correctly, that should be
+-      done in the prepare phase.
+-
+       After the prepare callback method returns, no new children may be
+       registered below the device.  The method may also prepare the device or
+       driver in some way for the upcoming system power transition (for
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 1effc8b..aa56cf3 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -49,6 +49,7 @@ extern int thaw_process(struct task_struct *p);
+ extern void refrigerator(void);
+ extern int freeze_processes(void);
++extern int freeze_kernel_threads(void);
+ extern void thaw_processes(void);
+ static inline int try_to_freeze(void)
+@@ -171,7 +172,8 @@ static inline void clear_freeze_flag(struct task_struct *p) {}
+ static inline int thaw_process(struct task_struct *p) { return 1; }
+ static inline void refrigerator(void) {}
+-static inline int freeze_processes(void) { BUG(); return 0; }
++static inline int freeze_processes(void) { return -ENOSYS; }
++static inline int freeze_kernel_threads(void) { return -ENOSYS; }
+ static inline void thaw_processes(void) {}
+ static inline int try_to_freeze(void) { return 0; }
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 8884c27..878218e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -334,13 +334,17 @@ int hibernation_snapshot(int platform_mode)
+       if (error)
+               goto Close;
+-      error = dpm_prepare(PMSG_FREEZE);
+-      if (error)
+-              goto Complete_devices;
+-
+       /* Preallocate image memory before shutting down devices. */
+       error = hibernate_preallocate_memory();
+       if (error)
++              goto Close;
++
++      error = freeze_kernel_threads();
++      if (error)
++              goto Close;
++
++      error = dpm_prepare(PMSG_FREEZE);
++      if (error)
+               goto Complete_devices;
+       suspend_console();
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 9a00a0a..e620639 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -228,7 +228,8 @@ extern int pm_test_level;
+ #ifdef CONFIG_SUSPEND_FREEZER
+ static inline int suspend_freeze_processes(void)
+ {
+-      return freeze_processes();
++      int error = freeze_processes();
++      return error ? : freeze_kernel_threads();
+ }
+ static inline void suspend_thaw_processes(void)
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 0cf3a27..addbbe5 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -135,7 +135,7 @@ static int try_to_freeze_tasks(bool sig_only)
+ }
+ /**
+- *    freeze_processes - tell processes to enter the refrigerator
++ * freeze_processes - Signal user space processes to enter the refrigerator.
+  */
+ int freeze_processes(void)
+ {
+@@ -143,20 +143,30 @@ int freeze_processes(void)
+       printk("Freezing user space processes ... ");
+       error = try_to_freeze_tasks(true);
+-      if (error)
+-              goto Exit;
+-      printk("done.\n");
++      if (!error) {
++              printk("done.");
++              oom_killer_disable();
++      }
++      printk("\n");
++      BUG_ON(in_atomic());
++
++      return error;
++}
++
++/**
++ * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
++ */
++int freeze_kernel_threads(void)
++{
++      int error;
+       printk("Freezing remaining freezable tasks ... ");
+       error = try_to_freeze_tasks(false);
+-      if (error)
+-              goto Exit;
+-      printk("done.");
++      if (!error)
++              printk("done.");
+-      oom_killer_disable();
+- Exit:
+-      BUG_ON(in_atomic());
+       printk("\n");
++      BUG_ON(in_atomic());
+       return error;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0087-PM-Hibernate-Fix-typo-in-a-kerneldoc-comment.patch b/patches.runtime_pm/0087-PM-Hibernate-Fix-typo-in-a-kerneldoc-comment.patch
new file mode 100644 (file)
index 0000000..b85e4b2
--- /dev/null
@@ -0,0 +1,35 @@
+From faa4d9443e70182fb222a45a878f209de0d974a3 Mon Sep 17 00:00:00 2001
+From: Barry Song <Baohua.Song@csr.com>
+Date: Tue, 27 Sep 2011 22:05:44 +0200
+Subject: PM / Hibernate: Fix typo in a kerneldoc comment
+
+Fix a typo in a function name in the kerneldoc comment next to
+resume_target_kernel().
+
+[rjw: Changed the subject slightly, added the changelog.]
+
+Signed-off-by: Barry Song <Baohua.Song@csr.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 21e82808fc465b66fedaac0f4e885cafb304e843)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 878218e..089ab9c 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -467,7 +467,7 @@ static int resume_target_kernel(bool platform_mode)
+  * @platform_mode: If set, use platform driver to prepare for the transition.
+  *
+  * This routine must be called with pm_mutex held.  If it is successful, control
+- * reappears in the restored target kernel in hibernation_snaphot().
++ * reappears in the restored target kernel in hibernation_snapshot().
+  */
+ int hibernation_restore(int platform_mode)
+ {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0088-PM-Hibernate-Add-resumewait-param-to-support-MMC-lik.patch b/patches.runtime_pm/0088-PM-Hibernate-Add-resumewait-param-to-support-MMC-lik.patch
new file mode 100644 (file)
index 0000000..f550b8b
--- /dev/null
@@ -0,0 +1,101 @@
+From 954a3028e5ffd52c14190a8cbb06948b0d6cbb83 Mon Sep 17 00:00:00 2001
+From: Barry Song <baohua.song@csr.com>
+Date: Thu, 6 Oct 2011 20:34:46 +0200
+Subject: PM / Hibernate: Add resumewait param to support MMC-like devices as
+ resume file
+
+Some devices like MMC are async detected very slow. For example,
+drivers/mmc/host/sdhci.c launches a 200ms delayed work to detect
+MMC partitions then add disk.
+
+We have wait_for_device_probe() and scsi_complete_async_scans()
+before calling swsusp_check(), but it is not enough to wait for MMC.
+
+This patch adds resumewait kernel param just like rootwait so
+that we have enough time to wait until MMC is ready. The difference is
+that we wait for resume partition whereas rootwait waits for rootfs
+partition (which may be on a different device).
+
+This patch will make hibernation support many embedded products
+without SCSI devices, but with devices like MMC.
+
+[rjw: Modified the changelog slightly.]
+
+Signed-off-by: Barry Song <Baohua.Song@csr.com>
+Reviewed-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 6f8d7022a842809aeb24db1d15669198ef02c131)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/kernel-parameters.txt |    4 ++++
+ kernel/power/hibernate.c            |   16 ++++++++++++++++
+ 2 files changed, 20 insertions(+)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index aa47be7..5841804 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2220,6 +2220,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       in <PAGE_SIZE> units (needed only for swap files).
+                       See  Documentation/power/swsusp-and-swap-files.txt
++      resumewait      [HIBERNATION] Wait (indefinitely) for resume device to show up.
++                      Useful for devices that are detected asynchronously
++                      (e.g. USB and MMC devices).
++
+       hibernate=      [HIBERNATION]
+               noresume        Don't check if there's a hibernation image
+                               present during boot.
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 089ab9c..fe4a742 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -14,6 +14,7 @@
+ #include <linux/reboot.h>
+ #include <linux/string.h>
+ #include <linux/device.h>
++#include <linux/async.h>
+ #include <linux/kmod.h>
+ #include <linux/delay.h>
+ #include <linux/fs.h>
+@@ -31,6 +32,7 @@
+ static int nocompress = 0;
+ static int noresume = 0;
++static int resume_wait = 0;
+ static char resume_file[256] = CONFIG_PM_STD_PARTITION;
+ dev_t swsusp_resume_device;
+ sector_t swsusp_resume_block;
+@@ -737,6 +739,13 @@ static int software_resume(void)
+                * to wait for this to finish.
+                */
+               wait_for_device_probe();
++
++              if (resume_wait) {
++                      while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
++                              msleep(10);
++                      async_synchronize_full();
++              }
++
+               /*
+                * We can't depend on SCSI devices being available after loading
+                * one of their modules until scsi_complete_async_scans() is
+@@ -1065,7 +1074,14 @@ static int __init noresume_setup(char *str)
+       return 1;
+ }
++static int __init resumewait_setup(char *str)
++{
++      resume_wait = 1;
++      return 1;
++}
++
+ __setup("noresume", noresume_setup);
+ __setup("resume_offset=", resume_offset_setup);
+ __setup("resume=", resume_setup);
+ __setup("hibernate=", hibernate_setup);
++__setup("resumewait", resumewait_setup);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0089-PM-Hibernate-Add-resumedelay-kernel-param-in-additio.patch b/patches.runtime_pm/0089-PM-Hibernate-Add-resumedelay-kernel-param-in-additio.patch
new file mode 100644 (file)
index 0000000..8835e5a
--- /dev/null
@@ -0,0 +1,82 @@
+From 82e30713de5b155a0665f08dbe84b04deb4c6b75 Mon Sep 17 00:00:00 2001
+From: Barry Song <baohua.song@csr.com>
+Date: Mon, 10 Oct 2011 23:38:41 +0200
+Subject: PM / Hibernate: Add resumedelay kernel param in addition to
+ resumewait
+
+Patch "PM / Hibernate: Add resumewait param to support MMC-like
+devices as resume file" added the resumewait kernel command line
+option.  The present patch adds resumedelay so that
+resumewait/delay were analogous to rootwait/delay.
+
+[rjw: Modified the subject and changelog slightly.]
+
+Signed-off-by: Barry Song <baohua.song@csr.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit f126f7334f72e2fd1b7a62bba20c488b86e6e7c4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/kernel-parameters.txt |    3 +++
+ kernel/power/hibernate.c            |   14 ++++++++++++++
+ 2 files changed, 17 insertions(+)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 5841804..09a5f8a 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2220,6 +2220,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       in <PAGE_SIZE> units (needed only for swap files).
+                       See  Documentation/power/swsusp-and-swap-files.txt
++      resumedelay=    [HIBERNATION] Delay (in seconds) to pause before attempting to
++                      read the resume files
++
+       resumewait      [HIBERNATION] Wait (indefinitely) for resume device to show up.
+                       Useful for devices that are detected asynchronously
+                       (e.g. USB and MMC devices).
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index fe4a742..96477fc 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -33,6 +33,7 @@
+ static int nocompress = 0;
+ static int noresume = 0;
+ static int resume_wait = 0;
++static int resume_delay = 0;
+ static char resume_file[256] = CONFIG_PM_STD_PARTITION;
+ dev_t swsusp_resume_device;
+ sector_t swsusp_resume_block;
+@@ -731,6 +732,12 @@ static int software_resume(void)
+       pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
++      if (resume_delay) {
++              printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
++                      resume_delay);
++              ssleep(resume_delay);
++      }
++
+       /* Check if the device is there */
+       swsusp_resume_device = name_to_dev_t(resume_file);
+       if (!swsusp_resume_device) {
+@@ -1080,8 +1087,15 @@ static int __init resumewait_setup(char *str)
+       return 1;
+ }
++static int __init resumedelay_setup(char *str)
++{
++      resume_delay = simple_strtoul(str, NULL, 0);
++      return 1;
++}
++
+ __setup("noresume", noresume_setup);
+ __setup("resume_offset=", resume_offset_setup);
+ __setup("resume=", resume_setup);
+ __setup("hibernate=", hibernate_setup);
+ __setup("resumewait", resumewait_setup);
++__setup("resumedelay=", resumedelay_setup);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0090-PM-Hibernate-Do-not-initialize-static-and-extern-var.patch b/patches.runtime_pm/0090-PM-Hibernate-Do-not-initialize-static-and-extern-var.patch
new file mode 100644 (file)
index 0000000..03e0900
--- /dev/null
@@ -0,0 +1,46 @@
+From 997dece6e233d0eb8fba1405d18ee4d14d8b1234 Mon Sep 17 00:00:00 2001
+From: Barry Song <Baohua.Song@csr.com>
+Date: Tue, 11 Oct 2011 23:29:18 -0700
+Subject: PM / Hibernate: Do not initialize static and extern variables to 0
+
+Static and extern variables in kernel/power/hibernate.c need not be
+initialized to 0 explicitly, so remove those initializations.
+
+[rjw: Modified subject, added changelog.]
+
+Signed-off-by: Barry Song <Baohua.Song@csr.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit d231ff1af70a2df43d809173cf8c94e9c3beb853)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 96477fc..148564d 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -30,14 +30,14 @@
+ #include "power.h"
+-static int nocompress = 0;
+-static int noresume = 0;
+-static int resume_wait = 0;
+-static int resume_delay = 0;
++static int nocompress;
++static int noresume;
++static int resume_wait;
++static int resume_delay;
+ static char resume_file[256] = CONFIG_PM_STD_PARTITION;
+ dev_t swsusp_resume_device;
+ sector_t swsusp_resume_block;
+-int in_suspend __nosavedata = 0;
++int in_suspend __nosavedata;
+ enum {
+       HIBERNATION_INVALID,
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0091-PM-Hibernate-Improve-performance-of-LZO-plain-hibern.patch b/patches.runtime_pm/0091-PM-Hibernate-Improve-performance-of-LZO-plain-hibern.patch
new file mode 100644 (file)
index 0000000..a3fd97a
--- /dev/null
@@ -0,0 +1,1160 @@
+From bfbfea61bd966d91d4193a3ee63cc6e055cbe936 Mon Sep 17 00:00:00 2001
+From: Bojan Smojver <bojan@rexursive.com>
+Date: Thu, 13 Oct 2011 23:58:07 +0200
+Subject: PM / Hibernate: Improve performance of LZO/plain hibernation,
+ checksum image
+
+Use threads for LZO compression/decompression on hibernate/thaw.
+Improve buffering on hibernate/thaw.
+Calculate/verify CRC32 of the image pages on hibernate/thaw.
+
+In my testing, this improved write/read speed by a factor of about two.
+
+Signed-off-by: Bojan Smojver <bojan@rexursive.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 081a9d043c983f161b78fdc4671324d1342b86bc)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/Kconfig     |    1 +
+ kernel/power/hibernate.c |    3 +
+ kernel/power/power.h     |    1 +
+ kernel/power/swap.c      |  818 ++++++++++++++++++++++++++++++++++++----------
+ 4 files changed, 645 insertions(+), 178 deletions(-)
+
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index 2943e3b..deb5461 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -27,6 +27,7 @@ config HIBERNATION
+       select HIBERNATE_CALLBACKS
+       select LZO_COMPRESS
+       select LZO_DECOMPRESS
++      select CRC32
+       ---help---
+         Enable the suspend to disk (STD) functionality, which is usually
+         called "hibernation" in user interfaces.  STD checkpoints the
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 148564d..bb170c2 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -657,6 +657,9 @@ int hibernate(void)
+                       flags |= SF_PLATFORM_MODE;
+               if (nocompress)
+                       flags |= SF_NOCOMPRESS_MODE;
++              else
++                      flags |= SF_CRC32_MODE;
++
+               pr_debug("PM: writing image.\n");
+               error = swsusp_write(flags);
+               swsusp_free();
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index e620639..23a2db1 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -146,6 +146,7 @@ extern int swsusp_swap_in_use(void);
+  */
+ #define SF_PLATFORM_MODE      1
+ #define SF_NOCOMPRESS_MODE    2
++#define SF_CRC32_MODE         4
+ /* kernel/power/hibernate.c */
+ extern int swsusp_check(void);
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 7c97c3a..11a594c 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -27,6 +27,10 @@
+ #include <linux/slab.h>
+ #include <linux/lzo.h>
+ #include <linux/vmalloc.h>
++#include <linux/cpumask.h>
++#include <linux/atomic.h>
++#include <linux/kthread.h>
++#include <linux/crc32.h>
+ #include "power.h"
+@@ -43,8 +47,7 @@
+  *    allocated and populated one at a time, so we only need one memory
+  *    page to set up the entire structure.
+  *
+- *    During resume we also only need to use one swap_map_page structure
+- *    at a time.
++ *    During resume we pick up all swap_map_page structures into a list.
+  */
+ #define MAP_PAGE_ENTRIES      (PAGE_SIZE / sizeof(sector_t) - 1)
+@@ -54,6 +57,11 @@ struct swap_map_page {
+       sector_t next_swap;
+ };
++struct swap_map_page_list {
++      struct swap_map_page *map;
++      struct swap_map_page_list *next;
++};
++
+ /**
+  *    The swap_map_handle structure is used for handling swap in
+  *    a file-alike way
+@@ -61,13 +69,18 @@ struct swap_map_page {
+ struct swap_map_handle {
+       struct swap_map_page *cur;
++      struct swap_map_page_list *maps;
+       sector_t cur_swap;
+       sector_t first_sector;
+       unsigned int k;
++      unsigned long nr_free_pages, written;
++      u32 crc32;
+ };
+ struct swsusp_header {
+-      char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
++      char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
++                    sizeof(u32)];
++      u32     crc32;
+       sector_t image;
+       unsigned int flags;     /* Flags to pass to the "boot" kernel */
+       char    orig_sig[10];
+@@ -199,6 +212,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
+               memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
+               swsusp_header->image = handle->first_sector;
+               swsusp_header->flags = flags;
++              if (flags & SF_CRC32_MODE)
++                      swsusp_header->crc32 = handle->crc32;
+               error = hib_bio_write_page(swsusp_resume_block,
+                                       swsusp_header, NULL);
+       } else {
+@@ -245,6 +260,7 @@ static int swsusp_swap_check(void)
+ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+ {
+       void *src;
++      int ret;
+       if (!offset)
+               return -ENOSPC;
+@@ -254,9 +270,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+               if (src) {
+                       copy_page(src, buf);
+               } else {
+-                      WARN_ON_ONCE(1);
+-                      bio_chain = NULL;       /* Go synchronous */
+-                      src = buf;
++                      ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
++                      if (ret)
++                              return ret;
++                      src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++                      if (src) {
++                              copy_page(src, buf);
++                      } else {
++                              WARN_ON_ONCE(1);
++                              bio_chain = NULL;       /* Go synchronous */
++                              src = buf;
++                      }
+               }
+       } else {
+               src = buf;
+@@ -293,6 +317,8 @@ static int get_swap_writer(struct swap_map_handle *handle)
+               goto err_rel;
+       }
+       handle->k = 0;
++      handle->nr_free_pages = nr_free_pages() >> 1;
++      handle->written = 0;
+       handle->first_sector = handle->cur_swap;
+       return 0;
+ err_rel:
+@@ -316,20 +342,23 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+               return error;
+       handle->cur->entries[handle->k++] = offset;
+       if (handle->k >= MAP_PAGE_ENTRIES) {
+-              error = hib_wait_on_bio_chain(bio_chain);
+-              if (error)
+-                      goto out;
+               offset = alloc_swapdev_block(root_swap);
+               if (!offset)
+                       return -ENOSPC;
+               handle->cur->next_swap = offset;
+-              error = write_page(handle->cur, handle->cur_swap, NULL);
++              error = write_page(handle->cur, handle->cur_swap, bio_chain);
+               if (error)
+                       goto out;
+               clear_page(handle->cur);
+               handle->cur_swap = offset;
+               handle->k = 0;
+       }
++      if (bio_chain && ++handle->written > handle->nr_free_pages) {
++              error = hib_wait_on_bio_chain(bio_chain);
++              if (error)
++                      goto out;
++              handle->written = 0;
++      }
+  out:
+       return error;
+ }
+@@ -372,6 +401,13 @@ static int swap_writer_finish(struct swap_map_handle *handle,
+                                    LZO_HEADER, PAGE_SIZE)
+ #define LZO_CMP_SIZE  (LZO_CMP_PAGES * PAGE_SIZE)
++/* Maximum number of threads for compression/decompression. */
++#define LZO_THREADS   3
++
++/* Maximum number of pages for read buffering. */
++#define LZO_READ_PAGES        (MAP_PAGE_ENTRIES * 8)
++
++
+ /**
+  *    save_image - save the suspend image data
+  */
+@@ -419,6 +455,92 @@ static int save_image(struct swap_map_handle *handle,
+       return ret;
+ }
++/**
++ * Structure used for CRC32.
++ */
++struct crc_data {
++      struct task_struct *thr;                  /* thread */
++      atomic_t ready;                           /* ready to start flag */
++      atomic_t stop;                            /* ready to stop flag */
++      unsigned run_threads;                     /* nr current threads */
++      wait_queue_head_t go;                     /* start crc update */
++      wait_queue_head_t done;                   /* crc update done */
++      u32 *crc32;                               /* points to handle's crc32 */
++      size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
++      unsigned char *unc[LZO_THREADS];          /* uncompressed data */
++};
++
++/**
++ * CRC32 update function that runs in its own thread.
++ */
++static int crc32_threadfn(void *data)
++{
++      struct crc_data *d = data;
++      unsigned i;
++
++      while (1) {
++              wait_event(d->go, atomic_read(&d->ready) ||
++                                kthread_should_stop());
++              if (kthread_should_stop()) {
++                      d->thr = NULL;
++                      atomic_set(&d->stop, 1);
++                      wake_up(&d->done);
++                      break;
++              }
++              atomic_set(&d->ready, 0);
++
++              for (i = 0; i < d->run_threads; i++)
++                      *d->crc32 = crc32_le(*d->crc32,
++                                           d->unc[i], *d->unc_len[i]);
++              atomic_set(&d->stop, 1);
++              wake_up(&d->done);
++      }
++      return 0;
++}
++/**
++ * Structure used for LZO data compression.
++ */
++struct cmp_data {
++      struct task_struct *thr;                  /* thread */
++      atomic_t ready;                           /* ready to start flag */
++      atomic_t stop;                            /* ready to stop flag */
++      int ret;                                  /* return code */
++      wait_queue_head_t go;                     /* start compression */
++      wait_queue_head_t done;                   /* compression done */
++      size_t unc_len;                           /* uncompressed length */
++      size_t cmp_len;                           /* compressed length */
++      unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
++      unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
++      unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
++};
++
++/**
++ * Compression function that runs in its own thread.
++ */
++static int lzo_compress_threadfn(void *data)
++{
++      struct cmp_data *d = data;
++
++      while (1) {
++              wait_event(d->go, atomic_read(&d->ready) ||
++                                kthread_should_stop());
++              if (kthread_should_stop()) {
++                      d->thr = NULL;
++                      d->ret = -1;
++                      atomic_set(&d->stop, 1);
++                      wake_up(&d->done);
++                      break;
++              }
++              atomic_set(&d->ready, 0);
++
++              d->ret = lzo1x_1_compress(d->unc, d->unc_len,
++                                        d->cmp + LZO_HEADER, &d->cmp_len,
++                                        d->wrk);
++              atomic_set(&d->stop, 1);
++              wake_up(&d->done);
++      }
++      return 0;
++}
+ /**
+  * save_image_lzo - Save the suspend image data compressed with LZO.
+@@ -437,42 +559,93 @@ static int save_image_lzo(struct swap_map_handle *handle,
+       struct bio *bio;
+       struct timeval start;
+       struct timeval stop;
+-      size_t off, unc_len, cmp_len;
+-      unsigned char *unc, *cmp, *wrk, *page;
++      size_t off;
++      unsigned thr, run_threads, nr_threads;
++      unsigned char *page = NULL;
++      struct cmp_data *data = NULL;
++      struct crc_data *crc = NULL;
++
++      /*
++       * We'll limit the number of threads for compression to limit memory
++       * footprint.
++       */
++      nr_threads = num_online_cpus() - 1;
++      nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+       page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+       if (!page) {
+               printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto out_clean;
+       }
+-      wrk = vmalloc(LZO1X_1_MEM_COMPRESS);
+-      if (!wrk) {
+-              printk(KERN_ERR "PM: Failed to allocate LZO workspace\n");
+-              free_page((unsigned long)page);
+-              return -ENOMEM;
++      data = vmalloc(sizeof(*data) * nr_threads);
++      if (!data) {
++              printk(KERN_ERR "PM: Failed to allocate LZO data\n");
++              ret = -ENOMEM;
++              goto out_clean;
+       }
++      for (thr = 0; thr < nr_threads; thr++)
++              memset(&data[thr], 0, offsetof(struct cmp_data, go));
+-      unc = vmalloc(LZO_UNC_SIZE);
+-      if (!unc) {
+-              printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
+-              vfree(wrk);
+-              free_page((unsigned long)page);
+-              return -ENOMEM;
++      crc = kmalloc(sizeof(*crc), GFP_KERNEL);
++      if (!crc) {
++              printk(KERN_ERR "PM: Failed to allocate crc\n");
++              ret = -ENOMEM;
++              goto out_clean;
++      }
++      memset(crc, 0, offsetof(struct crc_data, go));
++
++      /*
++       * Start the compression threads.
++       */
++      for (thr = 0; thr < nr_threads; thr++) {
++              init_waitqueue_head(&data[thr].go);
++              init_waitqueue_head(&data[thr].done);
++
++              data[thr].thr = kthread_run(lzo_compress_threadfn,
++                                          &data[thr],
++                                          "image_compress/%u", thr);
++              if (IS_ERR(data[thr].thr)) {
++                      data[thr].thr = NULL;
++                      printk(KERN_ERR
++                             "PM: Cannot start compression threads\n");
++                      ret = -ENOMEM;
++                      goto out_clean;
++              }
+       }
+-      cmp = vmalloc(LZO_CMP_SIZE);
+-      if (!cmp) {
+-              printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
+-              vfree(unc);
+-              vfree(wrk);
+-              free_page((unsigned long)page);
+-              return -ENOMEM;
++      /*
++       * Adjust number of free pages after all allocations have been done.
++       * We don't want to run out of pages when writing.
++       */
++      handle->nr_free_pages = nr_free_pages() >> 1;
++
++      /*
++       * Start the CRC32 thread.
++       */
++      init_waitqueue_head(&crc->go);
++      init_waitqueue_head(&crc->done);
++
++      handle->crc32 = 0;
++      crc->crc32 = &handle->crc32;
++      for (thr = 0; thr < nr_threads; thr++) {
++              crc->unc[thr] = data[thr].unc;
++              crc->unc_len[thr] = &data[thr].unc_len;
++      }
++
++      crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
++      if (IS_ERR(crc->thr)) {
++              crc->thr = NULL;
++              printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
++              ret = -ENOMEM;
++              goto out_clean;
+       }
+       printk(KERN_INFO
++              "PM: Using %u thread(s) for compression.\n"
+               "PM: Compressing and saving image data (%u pages) ...     ",
+-              nr_to_write);
++              nr_threads, nr_to_write);
+       m = nr_to_write / 100;
+       if (!m)
+               m = 1;
+@@ -480,55 +653,83 @@ static int save_image_lzo(struct swap_map_handle *handle,
+       bio = NULL;
+       do_gettimeofday(&start);
+       for (;;) {
+-              for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
+-                      ret = snapshot_read_next(snapshot);
+-                      if (ret < 0)
+-                              goto out_finish;
+-
+-                      if (!ret)
++              for (thr = 0; thr < nr_threads; thr++) {
++                      for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
++                              ret = snapshot_read_next(snapshot);
++                              if (ret < 0)
++                                      goto out_finish;
++
++                              if (!ret)
++                                      break;
++
++                              memcpy(data[thr].unc + off,
++                                     data_of(*snapshot), PAGE_SIZE);
++
++                              if (!(nr_pages % m))
++                                      printk(KERN_CONT "\b\b\b\b%3d%%",
++                                             nr_pages / m);
++                              nr_pages++;
++                      }
++                      if (!off)
+                               break;
+-                      memcpy(unc + off, data_of(*snapshot), PAGE_SIZE);
++                      data[thr].unc_len = off;
+-                      if (!(nr_pages % m))
+-                              printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
+-                      nr_pages++;
++                      atomic_set(&data[thr].ready, 1);
++                      wake_up(&data[thr].go);
+               }
+-              if (!off)
++              if (!thr)
+                       break;
+-              unc_len = off;
+-              ret = lzo1x_1_compress(unc, unc_len,
+-                                     cmp + LZO_HEADER, &cmp_len, wrk);
+-              if (ret < 0) {
+-                      printk(KERN_ERR "PM: LZO compression failed\n");
+-                      break;
+-              }
++              crc->run_threads = thr;
++              atomic_set(&crc->ready, 1);
++              wake_up(&crc->go);
+-              if (unlikely(!cmp_len ||
+-                           cmp_len > lzo1x_worst_compress(unc_len))) {
+-                      printk(KERN_ERR "PM: Invalid LZO compressed length\n");
+-                      ret = -1;
+-                      break;
+-              }
++              for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
++                      wait_event(data[thr].done,
++                                 atomic_read(&data[thr].stop));
++                      atomic_set(&data[thr].stop, 0);
+-              *(size_t *)cmp = cmp_len;
++                      ret = data[thr].ret;
+-              /*
+-               * Given we are writing one page at a time to disk, we copy
+-               * that much from the buffer, although the last bit will likely
+-               * be smaller than full page. This is OK - we saved the length
+-               * of the compressed data, so any garbage at the end will be
+-               * discarded when we read it.
+-               */
+-              for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
+-                      memcpy(page, cmp + off, PAGE_SIZE);
++                      if (ret < 0) {
++                              printk(KERN_ERR "PM: LZO compression failed\n");
++                              goto out_finish;
++                      }
+-                      ret = swap_write_page(handle, page, &bio);
+-                      if (ret)
++                      if (unlikely(!data[thr].cmp_len ||
++                                   data[thr].cmp_len >
++                                   lzo1x_worst_compress(data[thr].unc_len))) {
++                              printk(KERN_ERR
++                                     "PM: Invalid LZO compressed length\n");
++                              ret = -1;
+                               goto out_finish;
++                      }
++
++                      *(size_t *)data[thr].cmp = data[thr].cmp_len;
++
++                      /*
++                       * Given we are writing one page at a time to disk, we
++                       * copy that much from the buffer, although the last
++                       * bit will likely be smaller than full page. This is
++                       * OK - we saved the length of the compressed data, so
++                       * any garbage at the end will be discarded when we
++                       * read it.
++                       */
++                      for (off = 0;
++                           off < LZO_HEADER + data[thr].cmp_len;
++                           off += PAGE_SIZE) {
++                              memcpy(page, data[thr].cmp + off, PAGE_SIZE);
++
++                              ret = swap_write_page(handle, page, &bio);
++                              if (ret)
++                                      goto out_finish;
++                      }
+               }
++
++              wait_event(crc->done, atomic_read(&crc->stop));
++              atomic_set(&crc->stop, 0);
+       }
+ out_finish:
+@@ -536,16 +737,25 @@ out_finish:
+       do_gettimeofday(&stop);
+       if (!ret)
+               ret = err2;
+-      if (!ret)
++      if (!ret) {
+               printk(KERN_CONT "\b\b\b\bdone\n");
+-      else
++      } else {
+               printk(KERN_CONT "\n");
++      }
+       swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
+-
+-      vfree(cmp);
+-      vfree(unc);
+-      vfree(wrk);
+-      free_page((unsigned long)page);
++out_clean:
++      if (crc) {
++              if (crc->thr)
++                      kthread_stop(crc->thr);
++              kfree(crc);
++      }
++      if (data) {
++              for (thr = 0; thr < nr_threads; thr++)
++                      if (data[thr].thr)
++                              kthread_stop(data[thr].thr);
++              vfree(data);
++      }
++      if (page) free_page((unsigned long)page);
+       return ret;
+ }
+@@ -625,8 +835,15 @@ out_finish:
+ static void release_swap_reader(struct swap_map_handle *handle)
+ {
+-      if (handle->cur)
+-              free_page((unsigned long)handle->cur);
++      struct swap_map_page_list *tmp;
++
++      while (handle->maps) {
++              if (handle->maps->map)
++                      free_page((unsigned long)handle->maps->map);
++              tmp = handle->maps;
++              handle->maps = handle->maps->next;
++              kfree(tmp);
++      }
+       handle->cur = NULL;
+ }
+@@ -634,22 +851,46 @@ static int get_swap_reader(struct swap_map_handle *handle,
+               unsigned int *flags_p)
+ {
+       int error;
++      struct swap_map_page_list *tmp, *last;
++      sector_t offset;
+       *flags_p = swsusp_header->flags;
+       if (!swsusp_header->image) /* how can this happen? */
+               return -EINVAL;
+-      handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
+-      if (!handle->cur)
+-              return -ENOMEM;
++      handle->cur = NULL;
++      last = handle->maps = NULL;
++      offset = swsusp_header->image;
++      while (offset) {
++              tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
++              if (!tmp) {
++                      release_swap_reader(handle);
++                      return -ENOMEM;
++              }
++              memset(tmp, 0, sizeof(*tmp));
++              if (!handle->maps)
++                      handle->maps = tmp;
++              if (last)
++                      last->next = tmp;
++              last = tmp;
++
++              tmp->map = (struct swap_map_page *)
++                         __get_free_page(__GFP_WAIT | __GFP_HIGH);
++              if (!tmp->map) {
++                      release_swap_reader(handle);
++                      return -ENOMEM;
++              }
+-      error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
+-      if (error) {
+-              release_swap_reader(handle);
+-              return error;
++              error = hib_bio_read_page(offset, tmp->map, NULL);
++              if (error) {
++                      release_swap_reader(handle);
++                      return error;
++              }
++              offset = tmp->map->next_swap;
+       }
+       handle->k = 0;
++      handle->cur = handle->maps->map;
+       return 0;
+ }
+@@ -658,6 +899,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
+ {
+       sector_t offset;
+       int error;
++      struct swap_map_page_list *tmp;
+       if (!handle->cur)
+               return -EINVAL;
+@@ -668,13 +910,15 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
+       if (error)
+               return error;
+       if (++handle->k >= MAP_PAGE_ENTRIES) {
+-              error = hib_wait_on_bio_chain(bio_chain);
+               handle->k = 0;
+-              offset = handle->cur->next_swap;
+-              if (!offset)
++              free_page((unsigned long)handle->maps->map);
++              tmp = handle->maps;
++              handle->maps = handle->maps->next;
++              kfree(tmp);
++              if (!handle->maps)
+                       release_swap_reader(handle);
+-              else if (!error)
+-                      error = hib_bio_read_page(offset, handle->cur, NULL);
++              else
++                      handle->cur = handle->maps->map;
+       }
+       return error;
+ }
+@@ -697,7 +941,7 @@ static int load_image(struct swap_map_handle *handle,
+                       unsigned int nr_to_read)
+ {
+       unsigned int m;
+-      int error = 0;
++      int ret = 0;
+       struct timeval start;
+       struct timeval stop;
+       struct bio *bio;
+@@ -713,15 +957,15 @@ static int load_image(struct swap_map_handle *handle,
+       bio = NULL;
+       do_gettimeofday(&start);
+       for ( ; ; ) {
+-              error = snapshot_write_next(snapshot);
+-              if (error <= 0)
++              ret = snapshot_write_next(snapshot);
++              if (ret <= 0)
+                       break;
+-              error = swap_read_page(handle, data_of(*snapshot), &bio);
+-              if (error)
++              ret = swap_read_page(handle, data_of(*snapshot), &bio);
++              if (ret)
+                       break;
+               if (snapshot->sync_read)
+-                      error = hib_wait_on_bio_chain(&bio);
+-              if (error)
++                      ret = hib_wait_on_bio_chain(&bio);
++              if (ret)
+                       break;
+               if (!(nr_pages % m))
+                       printk("\b\b\b\b%3d%%", nr_pages / m);
+@@ -729,17 +973,61 @@ static int load_image(struct swap_map_handle *handle,
+       }
+       err2 = hib_wait_on_bio_chain(&bio);
+       do_gettimeofday(&stop);
+-      if (!error)
+-              error = err2;
+-      if (!error) {
++      if (!ret)
++              ret = err2;
++      if (!ret) {
+               printk("\b\b\b\bdone\n");
+               snapshot_write_finalize(snapshot);
+               if (!snapshot_image_loaded(snapshot))
+-                      error = -ENODATA;
++                      ret = -ENODATA;
+       } else
+               printk("\n");
+       swsusp_show_speed(&start, &stop, nr_to_read, "Read");
+-      return error;
++      return ret;
++}
++
++/**
++ * Structure used for LZO data decompression.
++ */
++struct dec_data {
++      struct task_struct *thr;                  /* thread */
++      atomic_t ready;                           /* ready to start flag */
++      atomic_t stop;                            /* ready to stop flag */
++      int ret;                                  /* return code */
++      wait_queue_head_t go;                     /* start decompression */
++      wait_queue_head_t done;                   /* decompression done */
++      size_t unc_len;                           /* uncompressed length */
++      size_t cmp_len;                           /* compressed length */
++      unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
++      unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
++};
++
++/**
++ * Deompression function that runs in its own thread.
++ */
++static int lzo_decompress_threadfn(void *data)
++{
++      struct dec_data *d = data;
++
++      while (1) {
++              wait_event(d->go, atomic_read(&d->ready) ||
++                                kthread_should_stop());
++              if (kthread_should_stop()) {
++                      d->thr = NULL;
++                      d->ret = -1;
++                      atomic_set(&d->stop, 1);
++                      wake_up(&d->done);
++                      break;
++              }
++              atomic_set(&d->ready, 0);
++
++              d->unc_len = LZO_UNC_SIZE;
++              d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
++                                             d->unc, &d->unc_len);
++              atomic_set(&d->stop, 1);
++              wake_up(&d->done);
++      }
++      return 0;
+ }
+ /**
+@@ -753,50 +1041,120 @@ static int load_image_lzo(struct swap_map_handle *handle,
+                           unsigned int nr_to_read)
+ {
+       unsigned int m;
+-      int error = 0;
++      int ret = 0;
++      int eof = 0;
+       struct bio *bio;
+       struct timeval start;
+       struct timeval stop;
+       unsigned nr_pages;
+-      size_t i, off, unc_len, cmp_len;
+-      unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
+-
+-      for (i = 0; i < LZO_CMP_PAGES; i++) {
+-              page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+-              if (!page[i]) {
+-                      printk(KERN_ERR "PM: Failed to allocate LZO page\n");
++      size_t off;
++      unsigned i, thr, run_threads, nr_threads;
++      unsigned ring = 0, pg = 0, ring_size = 0,
++               have = 0, want, need, asked = 0;
++      unsigned long read_pages;
++      unsigned char **page = NULL;
++      struct dec_data *data = NULL;
++      struct crc_data *crc = NULL;
++
++      /*
++       * We'll limit the number of threads for decompression to limit memory
++       * footprint.
++       */
++      nr_threads = num_online_cpus() - 1;
++      nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
++
++      page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
++      if (!page) {
++              printk(KERN_ERR "PM: Failed to allocate LZO page\n");
++              ret = -ENOMEM;
++              goto out_clean;
++      }
+-                      while (i)
+-                              free_page((unsigned long)page[--i]);
++      data = vmalloc(sizeof(*data) * nr_threads);
++      if (!data) {
++              printk(KERN_ERR "PM: Failed to allocate LZO data\n");
++              ret = -ENOMEM;
++              goto out_clean;
++      }
++      for (thr = 0; thr < nr_threads; thr++)
++              memset(&data[thr], 0, offsetof(struct dec_data, go));
+-                      return -ENOMEM;
++      crc = kmalloc(sizeof(*crc), GFP_KERNEL);
++      if (!crc) {
++              printk(KERN_ERR "PM: Failed to allocate crc\n");
++              ret = -ENOMEM;
++              goto out_clean;
++      }
++      memset(crc, 0, offsetof(struct crc_data, go));
++
++      /*
++       * Start the decompression threads.
++       */
++      for (thr = 0; thr < nr_threads; thr++) {
++              init_waitqueue_head(&data[thr].go);
++              init_waitqueue_head(&data[thr].done);
++
++              data[thr].thr = kthread_run(lzo_decompress_threadfn,
++                                          &data[thr],
++                                          "image_decompress/%u", thr);
++              if (IS_ERR(data[thr].thr)) {
++                      data[thr].thr = NULL;
++                      printk(KERN_ERR
++                             "PM: Cannot start decompression threads\n");
++                      ret = -ENOMEM;
++                      goto out_clean;
+               }
+       }
+-      unc = vmalloc(LZO_UNC_SIZE);
+-      if (!unc) {
+-              printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
+-
+-              for (i = 0; i < LZO_CMP_PAGES; i++)
+-                      free_page((unsigned long)page[i]);
+-
+-              return -ENOMEM;
++      /*
++       * Start the CRC32 thread.
++       */
++      init_waitqueue_head(&crc->go);
++      init_waitqueue_head(&crc->done);
++
++      handle->crc32 = 0;
++      crc->crc32 = &handle->crc32;
++      for (thr = 0; thr < nr_threads; thr++) {
++              crc->unc[thr] = data[thr].unc;
++              crc->unc_len[thr] = &data[thr].unc_len;
+       }
+-      cmp = vmalloc(LZO_CMP_SIZE);
+-      if (!cmp) {
+-              printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
++      crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
++      if (IS_ERR(crc->thr)) {
++              crc->thr = NULL;
++              printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
++              ret = -ENOMEM;
++              goto out_clean;
++      }
+-              vfree(unc);
+-              for (i = 0; i < LZO_CMP_PAGES; i++)
+-                      free_page((unsigned long)page[i]);
++      /*
++       * Adjust number of pages for read buffering, in case we are short.
++       */
++      read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
++      read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
+-              return -ENOMEM;
++      for (i = 0; i < read_pages; i++) {
++              page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
++                                                __GFP_WAIT | __GFP_HIGH :
++                                                __GFP_WAIT);
++              if (!page[i]) {
++                      if (i < LZO_CMP_PAGES) {
++                              ring_size = i;
++                              printk(KERN_ERR
++                                     "PM: Failed to allocate LZO pages\n");
++                              ret = -ENOMEM;
++                              goto out_clean;
++                      } else {
++                              break;
++                      }
++              }
+       }
++      want = ring_size = i;
+       printk(KERN_INFO
++              "PM: Using %u thread(s) for decompression.\n"
+               "PM: Loading and decompressing image data (%u pages) ...     ",
+-              nr_to_read);
++              nr_threads, nr_to_read);
+       m = nr_to_read / 100;
+       if (!m)
+               m = 1;
+@@ -804,85 +1162,189 @@ static int load_image_lzo(struct swap_map_handle *handle,
+       bio = NULL;
+       do_gettimeofday(&start);
+-      error = snapshot_write_next(snapshot);
+-      if (error <= 0)
++      ret = snapshot_write_next(snapshot);
++      if (ret <= 0)
+               goto out_finish;
+-      for (;;) {
+-              error = swap_read_page(handle, page[0], NULL); /* sync */
+-              if (error)
+-                      break;
+-
+-              cmp_len = *(size_t *)page[0];
+-              if (unlikely(!cmp_len ||
+-                           cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
+-                      printk(KERN_ERR "PM: Invalid LZO compressed length\n");
+-                      error = -1;
+-                      break;
++      for(;;) {
++              for (i = 0; !eof && i < want; i++) {
++                      ret = swap_read_page(handle, page[ring], &bio);
++                      if (ret) {
++                              /*
++                               * On real read error, finish. On end of data,
++                               * set EOF flag and just exit the read loop.
++                               */
++                              if (handle->cur &&
++                                  handle->cur->entries[handle->k]) {
++                                      goto out_finish;
++                              } else {
++                                      eof = 1;
++                                      break;
++                              }
++                      }
++                      if (++ring >= ring_size)
++                              ring = 0;
+               }
++              asked += i;
++              want -= i;
+-              for (off = PAGE_SIZE, i = 1;
+-                   off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
+-                      error = swap_read_page(handle, page[i], &bio);
+-                      if (error)
++              /*
++               * We are out of data, wait for some more.
++               */
++              if (!have) {
++                      if (!asked)
++                              break;
++
++                      ret = hib_wait_on_bio_chain(&bio);
++                      if (ret)
+                               goto out_finish;
++                      have += asked;
++                      asked = 0;
++                      if (eof)
++                              eof = 2;
+               }
+-              error = hib_wait_on_bio_chain(&bio); /* need all data now */
+-              if (error)
+-                      goto out_finish;
+-
+-              for (off = 0, i = 0;
+-                   off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
+-                      memcpy(cmp + off, page[i], PAGE_SIZE);
++              if (crc->run_threads) {
++                      wait_event(crc->done, atomic_read(&crc->stop));
++                      atomic_set(&crc->stop, 0);
++                      crc->run_threads = 0;
+               }
+-              unc_len = LZO_UNC_SIZE;
+-              error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len,
+-                                            unc, &unc_len);
+-              if (error < 0) {
+-                      printk(KERN_ERR "PM: LZO decompression failed\n");
+-                      break;
++              for (thr = 0; have && thr < nr_threads; thr++) {
++                      data[thr].cmp_len = *(size_t *)page[pg];
++                      if (unlikely(!data[thr].cmp_len ||
++                                   data[thr].cmp_len >
++                                   lzo1x_worst_compress(LZO_UNC_SIZE))) {
++                              printk(KERN_ERR
++                                     "PM: Invalid LZO compressed length\n");
++                              ret = -1;
++                              goto out_finish;
++                      }
++
++                      need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
++                                          PAGE_SIZE);
++                      if (need > have) {
++                              if (eof > 1) {
++                                      ret = -1;
++                                      goto out_finish;
++                              }
++                              break;
++                      }
++
++                      for (off = 0;
++                           off < LZO_HEADER + data[thr].cmp_len;
++                           off += PAGE_SIZE) {
++                              memcpy(data[thr].cmp + off,
++                                     page[pg], PAGE_SIZE);
++                              have--;
++                              want++;
++                              if (++pg >= ring_size)
++                                      pg = 0;
++                      }
++
++                      atomic_set(&data[thr].ready, 1);
++                      wake_up(&data[thr].go);
+               }
+-              if (unlikely(!unc_len ||
+-                           unc_len > LZO_UNC_SIZE ||
+-                           unc_len & (PAGE_SIZE - 1))) {
+-                      printk(KERN_ERR "PM: Invalid LZO uncompressed length\n");
+-                      error = -1;
+-                      break;
++              /*
++               * Wait for more data while we are decompressing.
++               */
++              if (have < LZO_CMP_PAGES && asked) {
++                      ret = hib_wait_on_bio_chain(&bio);
++                      if (ret)
++                              goto out_finish;
++                      have += asked;
++                      asked = 0;
++                      if (eof)
++                              eof = 2;
+               }
+-              for (off = 0; off < unc_len; off += PAGE_SIZE) {
+-                      memcpy(data_of(*snapshot), unc + off, PAGE_SIZE);
++              for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
++                      wait_event(data[thr].done,
++                                 atomic_read(&data[thr].stop));
++                      atomic_set(&data[thr].stop, 0);
++
++                      ret = data[thr].ret;
+-                      if (!(nr_pages % m))
+-                              printk("\b\b\b\b%3d%%", nr_pages / m);
+-                      nr_pages++;
++                      if (ret < 0) {
++                              printk(KERN_ERR
++                                     "PM: LZO decompression failed\n");
++                              goto out_finish;
++                      }
+-                      error = snapshot_write_next(snapshot);
+-                      if (error <= 0)
++                      if (unlikely(!data[thr].unc_len ||
++                                   data[thr].unc_len > LZO_UNC_SIZE ||
++                                   data[thr].unc_len & (PAGE_SIZE - 1))) {
++                              printk(KERN_ERR
++                                     "PM: Invalid LZO uncompressed length\n");
++                              ret = -1;
+                               goto out_finish;
++                      }
++
++                      for (off = 0;
++                           off < data[thr].unc_len; off += PAGE_SIZE) {
++                              memcpy(data_of(*snapshot),
++                                     data[thr].unc + off, PAGE_SIZE);
++
++                              if (!(nr_pages % m))
++                                      printk("\b\b\b\b%3d%%", nr_pages / m);
++                              nr_pages++;
++
++                              ret = snapshot_write_next(snapshot);
++                              if (ret <= 0) {
++                                      crc->run_threads = thr + 1;
++                                      atomic_set(&crc->ready, 1);
++                                      wake_up(&crc->go);
++                                      goto out_finish;
++                              }
++                      }
+               }
++
++              crc->run_threads = thr;
++              atomic_set(&crc->ready, 1);
++              wake_up(&crc->go);
+       }
+ out_finish:
++      if (crc->run_threads) {
++              wait_event(crc->done, atomic_read(&crc->stop));
++              atomic_set(&crc->stop, 0);
++      }
+       do_gettimeofday(&stop);
+-      if (!error) {
++      if (!ret) {
+               printk("\b\b\b\bdone\n");
+               snapshot_write_finalize(snapshot);
+               if (!snapshot_image_loaded(snapshot))
+-                      error = -ENODATA;
++                      ret = -ENODATA;
++              if (!ret) {
++                      if (swsusp_header->flags & SF_CRC32_MODE) {
++                              if(handle->crc32 != swsusp_header->crc32) {
++                                      printk(KERN_ERR
++                                             "PM: Invalid image CRC32!\n");
++                                      ret = -ENODATA;
++                              }
++                      }
++              }
+       } else
+               printk("\n");
+       swsusp_show_speed(&start, &stop, nr_to_read, "Read");
+-
+-      vfree(cmp);
+-      vfree(unc);
+-      for (i = 0; i < LZO_CMP_PAGES; i++)
++out_clean:
++      for (i = 0; i < ring_size; i++)
+               free_page((unsigned long)page[i]);
++      if (crc) {
++              if (crc->thr)
++                      kthread_stop(crc->thr);
++              kfree(crc);
++      }
++      if (data) {
++              for (thr = 0; thr < nr_threads; thr++)
++                      if (data[thr].thr)
++                              kthread_stop(data[thr].thr);
++              vfree(data);
++      }
++      if (page) vfree(page);
+-      return error;
++      return ret;
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0092-PM-Sleep-Mark-devices-involved-in-wakeup-signaling-d.patch b/patches.runtime_pm/0092-PM-Sleep-Mark-devices-involved-in-wakeup-signaling-d.patch
new file mode 100644 (file)
index 0000000..645a80d
--- /dev/null
@@ -0,0 +1,95 @@
+From 435d910aeaf727d5b6c73cc242c9c3a59f7b8917 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 16 Oct 2011 23:34:36 +0200
+Subject: PM / Sleep: Mark devices involved in wakeup signaling during suspend
+
+The generic PM domains code in drivers/base/power/domain.c has
+to avoid powering off domains that provide power to wakeup devices
+during system suspend.  Currently, however, this only works for
+wakeup devices directly belonging to the given domain and not for
+their children (or the children of their children and so on).
+Thus, if there's a wakeup device whose parent belongs to a power
+domain handled by the generic PM domains code, the domain will be
+powered off during system suspend preventing the device from
+signaling wakeup.
+
+To address this problem introduce a device flag, power.wakeup_path,
+that will be set during system suspend for all wakeup devices,
+their parents, the parents of their parents and so on.  This way,
+all wakeup paths in the device hierarchy will be marked and the
+generic PM domains code will only need to avoid powering off
+domains containing devices whose power.wakeup_path is set.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 4ca46ff3e0d8c234cb40ebb6457653b59584426c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    4 ++--
+ drivers/base/power/main.c   |    8 +++++++-
+ include/linux/pm.h          |    1 +
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 22fe029..6790cf7 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -714,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+       if (ret)
+               return ret;
+-      if (device_may_wakeup(dev)
++      if (dev->power.wakeup_path
+           && genpd->active_wakeup && genpd->active_wakeup(dev))
+               return 0;
+@@ -938,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+       if (ret)
+               return ret;
+-      if (device_may_wakeup(dev)
++      if (dev->power.wakeup_path
+           && genpd->active_wakeup && genpd->active_wakeup(dev))
+               return 0;
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index b1b5826..59f8ab2 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -917,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+       }
+  End:
+-      dev->power.is_suspended = !error;
++      if (!error) {
++              dev->power.is_suspended = true;
++              if (dev->power.wakeup_path && dev->parent)
++                      dev->parent->power.wakeup_path = true;
++      }
+       device_unlock(dev);
+       complete_all(&dev->power.completion);
+@@ -1020,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
+       device_lock(dev);
++      dev->power.wakeup_path = device_may_wakeup(dev);
++
+       if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "preparing power domain ");
+               if (dev->pm_domain->ops.prepare)
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 91f248b..f15acb6 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -452,6 +452,7 @@ struct dev_pm_info {
+       struct list_head        entry;
+       struct completion       completion;
+       struct wakeup_source    *wakeup;
++      bool                    wakeup_path:1;
+ #else
+       unsigned int            should_wakeup:1;
+ #endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0093-PM-Documentation-Update-docs-about-suspend-and-CPU-h.patch b/patches.runtime_pm/0093-PM-Documentation-Update-docs-about-suspend-and-CPU-h.patch
new file mode 100644 (file)
index 0000000..87005be
--- /dev/null
@@ -0,0 +1,321 @@
+From a371c0a97e239d4243cbfd2c463f2f211d591538 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Wed, 19 Oct 2011 23:59:05 +0200
+Subject: PM / Documentation: Update docs about suspend and CPU hotplug
+
+Update the documentation about the interaction between the suspend (S3) call
+path and the CPU hotplug infrastructure.
+This patch focusses only on the activities of the freezer, cpu hotplug and
+the notifications involved. It outlines how regular CPU hotplug differs from
+the way it is invoked during suspend and also tries to explain the locking
+involved. In addition to that, it discusses the issue of microcode update
+during CPU hotplug operations.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 7fef9fc83fbd7293ea9fe665d14046422ebf4219)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/00-INDEX                   |    2 +
+ Documentation/power/suspend-and-cpuhotplug.txt |  275 ++++++++++++++++++++++++
+ 2 files changed, 277 insertions(+)
+ create mode 100644 Documentation/power/suspend-and-cpuhotplug.txt
+
+diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
+index 45e9d4a..a4d682f 100644
+--- a/Documentation/power/00-INDEX
++++ b/Documentation/power/00-INDEX
+@@ -26,6 +26,8 @@ s2ram.txt
+       - How to get suspend to ram working (and debug it when it isn't)
+ states.txt
+       - System power management states
++suspend-and-cpuhotplug.txt
++      - Explains the interaction between Suspend-to-RAM (S3) and CPU hotplug
+ swsusp-and-swap-files.txt
+       - Using swap files with software suspend (to disk)
+ swsusp-dmcrypt.txt
+diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
+new file mode 100644
+index 0000000..f28f9a6
+--- /dev/null
++++ b/Documentation/power/suspend-and-cpuhotplug.txt
+@@ -0,0 +1,275 @@
++Interaction of Suspend code (S3) with the CPU hotplug infrastructure
++
++     (C) 2011 Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
++
++
++I. How does the regular CPU hotplug code differ from how the Suspend-to-RAM
++   infrastructure uses it internally? And where do they share common code?
++
++Well, a picture is worth a thousand words... So ASCII art follows :-)
++
++[This depicts the current design in the kernel, and focusses only on the
++interactions involving the freezer and CPU hotplug and also tries to explain
++the locking involved. It outlines the notifications involved as well.
++But please note that here, only the call paths are illustrated, with the aim
++of describing where they take different paths and where they share code.
++What happens when regular CPU hotplug and Suspend-to-RAM race with each other
++is not depicted here.]
++
++On a high level, the suspend-resume cycle goes like this:
++
++|Freeze| -> |Disable nonboot| -> |Do suspend| -> |Enable nonboot| -> |Thaw |
++|tasks |    |     cpus      |    |          |    |     cpus     |    |tasks|
++
++
++More details follow:
++
++                                Suspend call path
++                                -----------------
++
++                                  Write 'mem' to
++                                /sys/power/state
++                                    syfs file
++                                        |
++                                        v
++                               Acquire pm_mutex lock
++                                        |
++                                        v
++                             Send PM_SUSPEND_PREPARE
++                                   notifications
++                                        |
++                                        v
++                                   Freeze tasks
++                                        |
++                                        |
++                                        v
++                              disable_nonboot_cpus()
++                                   /* start */
++                                        |
++                                        v
++                            Acquire cpu_add_remove_lock
++                                        |
++                                        v
++                             Iterate over CURRENTLY
++                                   online CPUs
++                                        |
++                                        |
++                                        |                ----------
++                                        v                          | L
++             ======>               _cpu_down()                     |
++            |              [This takes cpuhotplug.lock             |
++  Common    |               before taking down the CPU             |
++   code     |               and releases it when done]             | O
++            |            While it is at it, notifications          |
++            |            are sent when notable events occur,       |
++             ======>     by running all registered callbacks.      |
++                                        |                          | O
++                                        |                          |
++                                        |                          |
++                                        v                          |
++                            Note down these cpus in                | P
++                                frozen_cpus mask         ----------
++                                        |
++                                        v
++                           Disable regular cpu hotplug
++                        by setting cpu_hotplug_disabled=1
++                                        |
++                                        v
++                            Release cpu_add_remove_lock
++                                        |
++                                        v
++                       /* disable_nonboot_cpus() complete */
++                                        |
++                                        v
++                                   Do suspend
++
++
++
++Resuming back is likewise, with the counterparts being (in the order of
++execution during resume):
++* enable_nonboot_cpus() which involves:
++   |  Acquire cpu_add_remove_lock
++   |  Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug
++   |  Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
++   |  Release cpu_add_remove_lock
++   v
++
++* thaw tasks
++* send PM_POST_SUSPEND notifications
++* Release pm_mutex lock.
++
++
++It is to be noted here that the pm_mutex lock is acquired at the very
++beginning, when we are just starting out to suspend, and then released only
++after the entire cycle is complete (i.e., suspend + resume).
++
++
++
++                          Regular CPU hotplug call path
++                          -----------------------------
++
++                                Write 0 (or 1) to
++                       /sys/devices/system/cpu/cpu*/online
++                                    sysfs file
++                                        |
++                                        |
++                                        v
++                                    cpu_down()
++                                        |
++                                        v
++                           Acquire cpu_add_remove_lock
++                                        |
++                                        v
++                          If cpu_hotplug_disabled is 1
++                                return gracefully
++                                        |
++                                        |
++                                        v
++             ======>                _cpu_down()
++            |              [This takes cpuhotplug.lock
++  Common    |               before taking down the CPU
++   code     |               and releases it when done]
++            |            While it is at it, notifications
++            |           are sent when notable events occur,
++             ======>    by running all registered callbacks.
++                                        |
++                                        |
++                                        v
++                          Release cpu_add_remove_lock
++                               [That's it!, for
++                              regular CPU hotplug]
++
++
++
++So, as can be seen from the two diagrams (the parts marked as "Common code"),
++regular CPU hotplug and the suspend code path converge at the _cpu_down() and
++_cpu_up() functions. They differ in the arguments passed to these functions,
++in that during regular CPU hotplug, 0 is passed for the 'tasks_frozen'
++argument. But during suspend, since the tasks are already frozen by the time
++the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called
++with the 'tasks_frozen' argument set to 1.
++[See below for some known issues regarding this.]
++
++
++Important files and functions/entry points:
++------------------------------------------
++
++kernel/power/process.c : freeze_processes(), thaw_processes()
++kernel/power/suspend.c : suspend_prepare(), suspend_enter(), suspend_finish()
++kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](), [disable|enable]_nonboot_cpus()
++
++
++
++II. What are the issues involved in CPU hotplug?
++    -------------------------------------------
++
++There are some interesting situations involving CPU hotplug and microcode
++update on the CPUs, as discussed below:
++
++[Please bear in mind that the kernel requests the microcode images from
++userspace, using the request_firmware() function defined in
++drivers/base/firmware_class.c]
++
++
++a. When all the CPUs are identical:
++
++   This is the most common situation and it is quite straightforward: we want
++   to apply the same microcode revision to each of the CPUs.
++   To give an example of x86, the collect_cpu_info() function defined in
++   arch/x86/kernel/microcode_core.c helps in discovering the type of the CPU
++   and thereby in applying the correct microcode revision to it.
++   But note that the kernel does not maintain a common microcode image for the
++   all CPUs, in order to handle case 'b' described below.
++
++
++b. When some of the CPUs are different than the rest:
++
++   In this case since we probably need to apply different microcode revisions
++   to different CPUs, the kernel maintains a copy of the correct microcode
++   image for each CPU (after appropriate CPU type/model discovery using
++   functions such as collect_cpu_info()).
++
++
++c. When a CPU is physically hot-unplugged and a new (and possibly different
++   type of) CPU is hot-plugged into the system:
++
++   In the current design of the kernel, whenever a CPU is taken offline during
++   a regular CPU hotplug operation, upon receiving the CPU_DEAD notification
++   (which is sent by the CPU hotplug code), the microcode update driver's
++   callback for that event reacts by freeing the kernel's copy of the
++   microcode image for that CPU.
++
++   Hence, when a new CPU is brought online, since the kernel finds that it
++   doesn't have the microcode image, it does the CPU type/model discovery
++   afresh and then requests the userspace for the appropriate microcode image
++   for that CPU, which is subsequently applied.
++
++   For example, in x86, the mc_cpu_callback() function (which is the microcode
++   update driver's callback registered for CPU hotplug events) calls
++   microcode_update_cpu() which would call microcode_init_cpu() in this case,
++   instead of microcode_resume_cpu() when it finds that the kernel doesn't
++   have a valid microcode image. This ensures that the CPU type/model
++   discovery is performed and the right microcode is applied to the CPU after
++   getting it from userspace.
++
++
++d. Handling microcode update during suspend/hibernate:
++
++   Strictly speaking, during a CPU hotplug operation which does not involve
++   physically removing or inserting CPUs, the CPUs are not actually powered
++   off during a CPU offline. They are just put to the lowest C-states possible.
++   Hence, in such a case, it is not really necessary to re-apply microcode
++   when the CPUs are brought back online, since they wouldn't have lost the
++   image during the CPU offline operation.
++
++   This is the usual scenario encountered during a resume after a suspend.
++   However, in the case of hibernation, since all the CPUs are completely
++   powered off, during restore it becomes necessary to apply the microcode
++   images to all the CPUs.
++
++   [Note that we don't expect someone to physically pull out nodes and insert
++   nodes with a different type of CPUs in-between a suspend-resume or a
++   hibernate/restore cycle.]
++
++   In the current design of the kernel however, during a CPU offline operation
++   as part of the suspend/hibernate cycle (the CPU_DEAD_FROZEN notification),
++   the existing copy of microcode image in the kernel is not freed up.
++   And during the CPU online operations (during resume/restore), since the
++   kernel finds that it already has copies of the microcode images for all the
++   CPUs, it just applies them to the CPUs, avoiding any re-discovery of CPU
++   type/model and the need for validating whether the microcode revisions are
++   right for the CPUs or not (due to the above assumption that physical CPU
++   hotplug will not be done in-between suspend/resume or hibernate/restore
++   cycles).
++
++
++III. Are there any known problems when regular CPU hotplug and suspend race
++     with each other?
++
++Yes, they are listed below:
++
++1. When invoking regular CPU hotplug, the 'tasks_frozen' argument passed to
++   the _cpu_down() and _cpu_up() functions is *always* 0.
++   This might not reflect the true current state of the system, since the
++   tasks could have been frozen by an out-of-band event such as a suspend
++   operation in progress. Hence, it will lead to wrong notifications being
++   sent during the cpu online/offline events (eg, CPU_ONLINE notification
++   instead of CPU_ONLINE_FROZEN) which in turn will lead to execution of
++   inappropriate code by the callbacks registered for such CPU hotplug events.
++
++2. If a regular CPU hotplug stress test happens to race with the freezer due
++   to a suspend operation in progress at the same time, then we could hit the
++   situation described below:
++
++    * A regular cpu online operation continues its journey from userspace
++      into the kernel, since the freezing has not yet begun.
++    * Then freezer gets to work and freezes userspace.
++    * If cpu online has not yet completed the microcode update stuff by now,
++      it will now start waiting on the frozen userspace in the
++      TASK_UNINTERRUPTIBLE state, in order to get the microcode image.
++    * Now the freezer continues and tries to freeze the remaining tasks. But
++      due to this wait mentioned above, the freezer won't be able to freeze
++      the cpu online hotplug task and hence freezing of tasks fails.
++
++   As a result of this task freezing failure, the suspend operation gets
++   aborted.
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0094-PM-Clocks-Remove-redundant-NULL-checks-before-kfree.patch b/patches.runtime_pm/0094-PM-Clocks-Remove-redundant-NULL-checks-before-kfree.patch
new file mode 100644 (file)
index 0000000..ad1735b
--- /dev/null
@@ -0,0 +1,37 @@
+From 7f71d2623cd1d37e5ffd6a2c55fb5d4d2a990eb3 Mon Sep 17 00:00:00 2001
+From: Jonghwan Choi <jhbird.choi@samsung.com>
+Date: Sat, 22 Oct 2011 00:22:54 +0200
+Subject: PM / Clocks: Remove redundant NULL checks before kfree()
+
+Since kfree() checks it its argument is not NULL, it is not necessary
+to duplicate this check in __pm_clk_remove().
+
+[rjw: Added the changelog.]
+
+Signed-off-by: Jonghwan Choi <jhbird.choi@samsung.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 0ab1e79b825a5cd8aeb3b34d89c9a89dea900056)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/clock_ops.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index b876e60..5f0f85d 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -104,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
+                       clk_put(ce->clk);
+       }
+-      if (ce->con_id)
+-              kfree(ce->con_id);
+-
++      kfree(ce->con_id);
+       kfree(ce);
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0095-kernel-fix-several-implicit-usasges-of-kmod.h.patch b/patches.runtime_pm/0095-kernel-fix-several-implicit-usasges-of-kmod.h.patch
new file mode 100644 (file)
index 0000000..62b4f76
--- /dev/null
@@ -0,0 +1,70 @@
+From 8d843989de8dc924a683d14534fb2a44cc4eb3bf Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Thu, 26 May 2011 12:48:41 -0400
+Subject: kernel: fix several implicit usasges of kmod.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+These files were implicitly relying on <linux/kmod.h> coming in via
+module.h, as without it we get things like:
+
+kernel/power/suspend.c:100: error: implicit declaration of function â€˜usermodehelper_disable’
+kernel/power/suspend.c:109: error: implicit declaration of function â€˜usermodehelper_enable’
+kernel/power/user.c:254: error: implicit declaration of function â€˜usermodehelper_disable’
+kernel/power/user.c:261: error: implicit declaration of function â€˜usermodehelper_enable’
+
+kernel/sys.c:317: error: implicit declaration of function â€˜usermodehelper_disable’
+kernel/sys.c:1816: error: implicit declaration of function â€˜call_usermodehelper_setup’
+kernel/sys.c:1822: error: implicit declaration of function â€˜call_usermodehelper_setfns’
+kernel/sys.c:1824: error: implicit declaration of function â€˜call_usermodehelper_exec’
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+(cherry picked from commit 74da1ff71350f3638c51613085f89c0865d7fe08)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/suspend.c |    1 +
+ kernel/power/user.c    |    1 +
+ kernel/sys.c           |    1 +
+ 3 files changed, 3 insertions(+)
+
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index fdd4263..31aae32 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -12,6 +12,7 @@
+ #include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
++#include <linux/kmod.h>
+ #include <linux/console.h>
+ #include <linux/cpu.h>
+ #include <linux/syscalls.h>
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 42ddbc6..6d8f535 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -12,6 +12,7 @@
+ #include <linux/suspend.h>
+ #include <linux/syscalls.h>
+ #include <linux/reboot.h>
++#include <linux/kmod.h>
+ #include <linux/string.h>
+ #include <linux/device.h>
+ #include <linux/miscdevice.h>
+diff --git a/kernel/sys.c b/kernel/sys.c
+index f88dadc..f861492 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -13,6 +13,7 @@
+ #include <linux/prctl.h>
+ #include <linux/highuid.h>
+ #include <linux/fs.h>
++#include <linux/kmod.h>
+ #include <linux/perf_event.h>
+ #include <linux/resource.h>
+ #include <linux/kernel.h>
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0096-kernel-Fix-files-explicitly-needing-EXPORT_SYMBOL-in.patch b/patches.runtime_pm/0096-kernel-Fix-files-explicitly-needing-EXPORT_SYMBOL-in.patch
new file mode 100644 (file)
index 0000000..cb91694
--- /dev/null
@@ -0,0 +1,151 @@
+From 71a73f729b3d470a8680d70b87cb0a504ab27b64 Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Thu, 26 May 2011 16:00:52 -0400
+Subject: kernel: Fix files explicitly needing EXPORT_SYMBOL infrastructure
+
+These files were getting <linux/module.h> via an implicit non-obvious
+path, but we want to crush those out of existence since they cost
+time during compiles of processing thousands of lines of headers
+for no reason.  Give them the lightweight header that just contains
+the EXPORT_SYMBOL infrastructure.
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+(cherry picked from commit 6e5fdeedca610df600aabc393c4b1f44b128fe49)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/compat.c                 |    1 +
+ kernel/debug/kdb/kdb_debugger.c |    1 +
+ kernel/events/core.c            |    1 +
+ kernel/irq/generic-chip.c       |    1 +
+ kernel/power/hibernate.c        |    1 +
+ kernel/power/main.c             |    1 +
+ kernel/power/qos.c              |    1 +
+ kernel/power/suspend.c          |    1 +
+ kernel/time/posix-clock.c       |    1 +
+ kernel/trace/blktrace.c         |    1 +
+ 10 files changed, 10 insertions(+)
+
+diff --git a/kernel/compat.c b/kernel/compat.c
+index fc9eb093..caf13a2 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -21,6 +21,7 @@
+ #include <linux/unistd.h>
+ #include <linux/security.h>
+ #include <linux/timex.h>
++#include <linux/export.h>
+ #include <linux/migrate.h>
+ #include <linux/posix-timers.h>
+ #include <linux/times.h>
+diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
+index dd0b1b7..8427a79 100644
+--- a/kernel/debug/kdb/kdb_debugger.c
++++ b/kernel/debug/kdb/kdb_debugger.c
+@@ -11,6 +11,7 @@
+ #include <linux/kgdb.h>
+ #include <linux/kdb.h>
+ #include <linux/kdebug.h>
++#include <linux/export.h>
+ #include "kdb_private.h"
+ #include "../debug_core.h"
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 32a6151..1ebf968 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -25,6 +25,7 @@
+ #include <linux/reboot.h>
+ #include <linux/vmstat.h>
+ #include <linux/device.h>
++#include <linux/export.h>
+ #include <linux/vmalloc.h>
+ #include <linux/hardirq.h>
+ #include <linux/rculist.h>
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index e38544d..c84b470 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -6,6 +6,7 @@
+ #include <linux/io.h>
+ #include <linux/irq.h>
+ #include <linux/slab.h>
++#include <linux/export.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/syscore_ops.h>
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index bb170c2..3987d43 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -9,6 +9,7 @@
+  * This file is released under the GPLv2.
+  */
++#include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscalls.h>
+ #include <linux/reboot.h>
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index a52e884..71f49fe 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -8,6 +8,7 @@
+  *
+  */
++#include <linux/export.h>
+ #include <linux/kobject.h>
+ #include <linux/string.h>
+ #include <linux/resume-trace.h>
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 1c1797d..2c0a65e 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -43,6 +43,7 @@
+ #include <linux/kernel.h>
+ #include <linux/uaccess.h>
++#include <linux/export.h>
+ /*
+  * locking rule: all changes to constraints or notifiers lists
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 31aae32..4953dc0 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -22,6 +22,7 @@
+ #include <linux/list.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
++#include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
+ #include <trace/events/power.h>
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index c340ca6..ce033c7 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -18,6 +18,7 @@
+  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+  */
+ #include <linux/device.h>
++#include <linux/export.h>
+ #include <linux/file.h>
+ #include <linux/posix-clock.h>
+ #include <linux/slab.h>
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 6957aa2..c1ff082 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -23,6 +23,7 @@
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
+ #include <linux/debugfs.h>
++#include <linux/export.h>
+ #include <linux/time.h>
+ #include <linux/uaccess.h>
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0097-drivers-base-Add-export.h-for-EXPORT_SYMBOL-THIS_MOD.patch b/patches.runtime_pm/0097-drivers-base-Add-export.h-for-EXPORT_SYMBOL-THIS_MOD.patch
new file mode 100644 (file)
index 0000000..9187fed
--- /dev/null
@@ -0,0 +1,143 @@
+From 96dd806a2e016cb1e861cfc1bbee8477584726a7 Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Fri, 27 May 2011 07:12:15 -0400
+Subject: drivers/base: Add export.h for EXPORT_SYMBOL/THIS_MODULE as
+ required.
+
+Most of these files were implicitly getting EXPORT_SYMBOL via
+device.h which was including module.h, but that path will be broken
+soon.
+
+[ with input from Stephen Rothwell <sfr@canb.auug.org.au> ]
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+(cherry picked from commit 1b6bc32f0a7380102499deb6aa99a59e789efb33)
+
+Conflicts:
+
+       drivers/base/regmap/regcache.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/dma-mapping.c       |    1 +
+ drivers/base/hypervisor.c        |    1 +
+ drivers/base/power/generic_ops.c |    1 +
+ drivers/base/power/main.c        |    1 +
+ drivers/base/power/qos.c         |    1 +
+ drivers/base/power/runtime.c     |    1 +
+ drivers/base/power/sysfs.c       |    1 +
+ drivers/base/power/trace.c       |    1 +
+ drivers/base/power/wakeup.c      |    1 +
+ 9 files changed, 9 insertions(+)
+
+diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
+index 763d59c..6f3676f 100644
+--- a/drivers/base/dma-mapping.c
++++ b/drivers/base/dma-mapping.c
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/dma-mapping.h>
++#include <linux/export.h>
+ #include <linux/gfp.h>
+ /*
+diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
+index 6428cba..4f8b741 100644
+--- a/drivers/base/hypervisor.c
++++ b/drivers/base/hypervisor.c
+@@ -10,6 +10,7 @@
+ #include <linux/kobject.h>
+ #include <linux/device.h>
++#include <linux/export.h>
+ #include "base.h"
+ struct kobject *hypervisor_kobj;
+diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
+index 9508df7..265a0ee 100644
+--- a/drivers/base/power/generic_ops.c
++++ b/drivers/base/power/generic_ops.c
+@@ -8,6 +8,7 @@
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
++#include <linux/export.h>
+ #ifdef CONFIG_PM_RUNTIME
+ /**
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 59f8ab2..7fa0984 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -19,6 +19,7 @@
+ #include <linux/device.h>
+ #include <linux/kallsyms.h>
++#include <linux/export.h>
+ #include <linux/mutex.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index 91e0614..30a94ea 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -39,6 +39,7 @@
+ #include <linux/slab.h>
+ #include <linux/device.h>
+ #include <linux/mutex.h>
++#include <linux/export.h>
+ static DEFINE_MUTEX(dev_pm_qos_mtx);
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 6bb3aaf..1079e03 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/sched.h>
++#include <linux/export.h>
+ #include <linux/pm_runtime.h>
+ #include <trace/events/rpm.h>
+ #include "power.h"
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index 942d6a7..ac63d48 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -4,6 +4,7 @@
+ #include <linux/device.h>
+ #include <linux/string.h>
++#include <linux/export.h>
+ #include <linux/pm_runtime.h>
+ #include <asm/atomic.h>
+ #include <linux/jiffies.h>
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index af10abe..d94a1f5 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/resume-trace.h>
++#include <linux/export.h>
+ #include <linux/rtc.h>
+ #include <asm/rtc.h>
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index 14ee07e..caf995f 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/sched.h>
+ #include <linux/capability.h>
++#include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/seq_file.h>
+ #include <linux/debugfs.h>
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0098-drivers-base-change-module.h-export.h-in-power-commo.patch b/patches.runtime_pm/0098-drivers-base-change-module.h-export.h-in-power-commo.patch
new file mode 100644 (file)
index 0000000..48e9ec7
--- /dev/null
@@ -0,0 +1,33 @@
+From c904f21e7e8971f1326781657a9659e8c4cfbe0b Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Wed, 28 Sep 2011 18:23:03 -0400
+Subject: drivers/base: change module.h -> export.h in power/common.c
+
+This file isn't using full modular functionality, and hence
+can be "downgraded" to just using export.h
+
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+(cherry picked from commit aaf195444be47aa3d3776825b3b384a61f40dca4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/common.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
+index 29820c3..4af7c1c 100644
+--- a/drivers/base/power/common.c
++++ b/drivers/base/power/common.c
+@@ -8,7 +8,7 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/module.h>
++#include <linux/export.h>
+ #include <linux/slab.h>
+ #include <linux/pm_clock.h>
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0099-pm_runtime.h-explicitly-requires-notifier.h.patch b/patches.runtime_pm/0099-pm_runtime.h-explicitly-requires-notifier.h.patch
new file mode 100644 (file)
index 0000000..345e1b5
--- /dev/null
@@ -0,0 +1,31 @@
+From 9c210763508ed5490bbdc7aa7d2d2a205a60658d Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Fri, 27 May 2011 07:08:41 -0400
+Subject: pm_runtime.h: explicitly requires notifier.h
+
+This file was getting notifier.h via device.h --> module.h but
+the module.h inclusion is going away, so add notifier.h directly.
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+(cherry picked from commit 246359d37985000b8403487e46867c4eb610af72)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_runtime.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 70b2840..d8d9036 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -10,6 +10,7 @@
+ #define _LINUX_PM_RUNTIME_H
+ #include <linux/device.h>
++#include <linux/notifier.h>
+ #include <linux/pm.h>
+ #include <linux/jiffies.h>
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0100-PM-Sleep-Update-freezer-documentation.patch b/patches.runtime_pm/0100-PM-Sleep-Update-freezer-documentation.patch
new file mode 100644 (file)
index 0000000..db129a1
--- /dev/null
@@ -0,0 +1,53 @@
+From 7512e660364856ac3fa294de3e062b64f78fcfc8 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 3 Nov 2011 00:59:52 +0100
+Subject: PM / Sleep: Update freezer documentation
+
+This patch:
+ * Substitutes some obsolete references to kernel/power/process.c by
+   kernel/freezer.c.
+ * Mentions kernel/freezer.c as being part of the "freezer" code along
+   with the rest of the files.
+ * Fixes a trivial typo.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e9db50b839c592fcd22952d7f1dccbd0a56da57d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/freezing-of-tasks.txt |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
+index 38b5724..316c2ba 100644
+--- a/Documentation/power/freezing-of-tasks.txt
++++ b/Documentation/power/freezing-of-tasks.txt
+@@ -22,12 +22,12 @@ try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
+ either wakes them up, if they are kernel threads, or sends fake signals to them,
+ if they are user space processes.  A task that has TIF_FREEZE set, should react
+ to it by calling the function called refrigerator() (defined in
+-kernel/power/process.c), which sets the task's PF_FROZEN flag, changes its state
++kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
+ to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
+ Then, we say that the task is 'frozen' and therefore the set of functions
+ handling this mechanism is referred to as 'the freezer' (these functions are
+-defined in kernel/power/process.c and include/linux/freezer.h).  User space
+-processes are generally frozen before kernel threads.
++defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
++User space processes are generally frozen before kernel threads.
+ It is not recommended to call refrigerator() directly.  Instead, it is
+ recommended to use the try_to_freeze() function (defined in
+@@ -95,7 +95,7 @@ after the memory for the image has been freed, we don't want tasks to allocate
+ additional memory and we prevent them from doing that by freezing them earlier.
+ [Of course, this also means that device drivers should not allocate substantial
+ amounts of memory from their .suspend() callbacks before hibernation, but this
+-is e separate issue.]
++is a separate issue.]
+ 3. The third reason is to prevent user space processes and some kernel threads
+ from interfering with the suspending and resuming of devices.  A user space
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0101-PM-Runtime-Fix-runtime-accounting-calculation-error.patch b/patches.runtime_pm/0101-PM-Runtime-Fix-runtime-accounting-calculation-error.patch
new file mode 100644 (file)
index 0000000..01582ac
--- /dev/null
@@ -0,0 +1,40 @@
+From ab1561ee359a4a722934b737767c1c6bbc8facff Mon Sep 17 00:00:00 2001
+From: venu byravarasu <vbyravarasu@nvidia.com>
+Date: Thu, 3 Nov 2011 10:12:14 +0100
+Subject: PM / Runtime: Fix runtime accounting calculation error
+
+With delta type being int, its value is made zero
+for all values of now > 0x80000000.
+Hence fixing it.
+
+Signed-off-by: venu byravarasu <vbyravarasu@nvidia.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit def0c0a37d02820497fcd5a74b6cc93dbce5dc06)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 1079e03..e8a5172 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -30,13 +30,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
+ void update_pm_runtime_accounting(struct device *dev)
+ {
+       unsigned long now = jiffies;
+-      int delta;
++      unsigned long delta;
+       delta = now - dev->power.accounting_timestamp;
+-      if (delta < 0)
+-              delta = 0;
+-
+       dev->power.accounting_timestamp = now;
+       if (dev->power.disable_depth > 0)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0102-PM-QoS-Remove-redundant-check.patch b/patches.runtime_pm/0102-PM-QoS-Remove-redundant-check.patch
new file mode 100644 (file)
index 0000000..494e836
--- /dev/null
@@ -0,0 +1,33 @@
+From 96abfc67c5384082c9f292be29c7ad088f0c63eb Mon Sep 17 00:00:00 2001
+From: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Date: Thu, 3 Nov 2011 10:12:36 +0100
+Subject: PM / QoS: Remove redundant check
+
+Remove an "if" check, that repeats an equivalent one 6 lines above.
+
+Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 6513fd6972f725291ee8ce62c7a39fb8a6c7391e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/qos.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 2c0a65e..56db751 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -387,8 +387,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
+               pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
+               filp->private_data = req;
+-              if (filp->private_data)
+-                      return 0;
++              return 0;
+       }
+       return -EPERM;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0103-PM-Runtime-Automatically-retry-failed-autosuspends.patch b/patches.runtime_pm/0103-PM-Runtime-Automatically-retry-failed-autosuspends.patch
new file mode 100644 (file)
index 0000000..6b5e7fd
--- /dev/null
@@ -0,0 +1,95 @@
+From 81fce908edabcefa6c4197b6d5e02e566bfaabcc Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Thu, 3 Nov 2011 23:39:18 +0100
+Subject: PM / Runtime: Automatically retry failed autosuspends
+
+Originally, the runtime PM core would send an idle notification
+whenever a suspend attempt failed.  The idle callback routine could
+then schedule a delayed suspend for some time later.
+
+However this behavior was changed by commit
+f71648d73c1650b8b4aceb3856bebbde6daa3b86 (PM / Runtime: Remove idle
+notification after failing suspend).  No notifications were sent, and
+there was no clear mechanism to retry failed suspends.
+
+This caused problems for the usbhid driver, because it fails
+autosuspend attempts as long as a key is being held down.  Therefore
+this patch (as1492) adds a mechanism for retrying failed
+autosuspends.  If the callback routine updates the last_busy field so
+that the next autosuspend expiration time is in the future, the
+autosuspend will automatically be rescheduled.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Tested-by: Henrik Rydberg <rydberg@euromail.se>
+Cc: <stable@kernel.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 886486b792e4f6f96d4fbe8ec5bf20811cab7d6a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   10 ++++++++++
+ drivers/base/power/runtime.c       |   18 ++++++++++++++++--
+ 2 files changed, 26 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 0e85608..5336149 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -789,6 +789,16 @@ will behave normally, not taking the autosuspend delay into account.
+ Similarly, if the power.use_autosuspend field isn't set then the autosuspend
+ helper functions will behave just like the non-autosuspend counterparts.
++Under some circumstances a driver or subsystem may want to prevent a device
++from autosuspending immediately, even though the usage counter is zero and the
++autosuspend delay time has expired.  If the ->runtime_suspend() callback
++returns -EAGAIN or -EBUSY, and if the next autosuspend delay expiration time is
++in the future (as it normally would be if the callback invoked
++pm_runtime_mark_last_busy()), the PM core will automatically reschedule the
++autosuspend.  The ->runtime_suspend() callback can't do this rescheduling
++itself because no suspend requests of any kind are accepted while the device is
++suspending (i.e., while the callback is running).
++
+ The implementation is well suited for asynchronous use in interrupt contexts.
+ However such use inevitably involves races, because the PM core can't
+ synchronize ->runtime_suspend() callbacks with the arrival of I/O requests.
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index e8a5172..8c78443 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -294,6 +294,9 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+  * the callback was running then carry it out, otherwise send an idle
+  * notification for its parent (if the suspend succeeded and both
+  * ignore_children of parent->power and irq_safe of dev->power are not set).
++ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
++ * flag is set and the next autosuspend-delay expiration time is in the
++ * future, schedule another autosuspend attempt.
+  *
+  * This function must be called under dev->power.lock with interrupts disabled.
+  */
+@@ -414,10 +417,21 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       if (retval) {
+               __update_runtime_status(dev, RPM_ACTIVE);
+               dev->power.deferred_resume = false;
+-              if (retval == -EAGAIN || retval == -EBUSY)
++              if (retval == -EAGAIN || retval == -EBUSY) {
+                       dev->power.runtime_error = 0;
+-              else
++
++                      /*
++                       * If the callback routine failed an autosuspend, and
++                       * if the last_busy time has been updated so that there
++                       * is a new autosuspend expiration time, automatically
++                       * reschedule another autosuspend.
++                       */
++                      if ((rpmflags & RPM_AUTO) &&
++                          pm_runtime_autosuspend_expiration(dev) != 0)
++                              goto repeat;
++              } else {
+                       pm_runtime_cancel_pending(dev);
++              }
+               wake_up_all(&dev->power.wait_queue);
+               goto out;
+       }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0104-PM-QoS-Set-cpu_dma_pm_qos-name.patch b/patches.runtime_pm/0104-PM-QoS-Set-cpu_dma_pm_qos-name.patch
new file mode 100644 (file)
index 0000000..5d51c5e
--- /dev/null
@@ -0,0 +1,35 @@
+From 01ee19f4a4b983cc9694e6faec1a3a97bc53d8dc Mon Sep 17 00:00:00 2001
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Sun, 6 Nov 2011 21:54:12 +0100
+Subject: PM / QoS: Set cpu_dma_pm_qos->name
+
+Since commit 4a31a334, the name of this misc device is not initialized,
+which leads to a funny device named /dev/(null) being created and
+/proc/misc containing an entry with just a number but no name. The latter
+leads to complaints by cryptsetup, which caused me to investigate this
+matter.
+
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit a6f05b97d1ba87326bd96f3da9fef994830d6994)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/qos.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 56db751..995e3bd 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -70,6 +70,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
+ };
+ static struct pm_qos_object cpu_dma_pm_qos = {
+       .constraints = &cpu_dma_constraints,
++      .name = "cpu_dma_latency",
+ };
+ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0105-PM-OPP-Use-ERR_CAST-instead-of-ERR_PTR-PTR_ERR.patch b/patches.runtime_pm/0105-PM-OPP-Use-ERR_CAST-instead-of-ERR_PTR-PTR_ERR.patch
new file mode 100644 (file)
index 0000000..3b1adf1
--- /dev/null
@@ -0,0 +1,38 @@
+From b12440733cbf622189c4366b4f1d568fddc6b797 Mon Sep 17 00:00:00 2001
+From: Thomas Meyer <thomas@m3y3r.de>
+Date: Tue, 8 Nov 2011 22:34:00 +0100
+Subject: PM / OPP: Use ERR_CAST instead of ERR_PTR(PTR_ERR())
+
+Use ERR_CAST inlined function instead of ERR_PTR(PTR_ERR(...))
+
+[The semantic patch that makes this change is available
+ in scripts/coccinelle/api/err_cast.cocci.
+
+ More information about semantic patching is available at
+ http://coccinelle.lip6.fr/]
+
+Signed-off-by: Thomas Meyer <thomas@m3y3r.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 156acb166ea9a43d7fcdf9b8051694ce4e91dbfc)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/opp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
+index 434a6c0..95706fa 100644
+--- a/drivers/base/power/opp.c
++++ b/drivers/base/power/opp.c
+@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+       struct device_opp *dev_opp = find_device_opp(dev);
+       if (IS_ERR(dev_opp))
+-              return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
++              return ERR_CAST(dev_opp); /* matching type */
+       return &dev_opp->head;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0106-PM-Clocks-Only-disable-enabled-clocks-in-pm_clk_susp.patch b/patches.runtime_pm/0106-PM-Clocks-Only-disable-enabled-clocks-in-pm_clk_susp.patch
new file mode 100644 (file)
index 0000000..85d2f54
--- /dev/null
@@ -0,0 +1,44 @@
+From 181672b515b7c74537c40aba503d6aeff154471c Mon Sep 17 00:00:00 2001
+From: Magnus Damm <damm@opensource.se>
+Date: Thu, 10 Nov 2011 00:44:10 +0100
+Subject: PM / Clocks: Only disable enabled clocks in pm_clk_suspend()
+
+Refrain from running clk_disable() on clocks that
+have not been enabled. A typical case when this can
+happen is during Suspend-to-RAM for devices that have
+no driver associated with them. In such case the clock
+may be in default ACQUIRED state.
+
+Without this patch the sh7372 Mackerel board crashes
+in __clk_disable() during Suspend-to-RAM with:
+"Trying to disable clock 0xdeadbeef with 0 usecount"
+This happens for the CEU device which is added during
+boot. The test case has no CEU driver included in the
+kernel configuration. Needed for v3.2-rc1.
+
+Signed-off-by: Magnus Damm <damm@opensource.se>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 24050956e029a9ecff096e1992869ada4492963c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/clock_ops.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index 5f0f85d..428e55e 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev)
+       list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+               if (ce->status < PCE_STATUS_ERROR) {
+-                      clk_disable(ce->clk);
++                      if (ce->status == PCE_STATUS_ENABLED)
++                              clk_disable(ce->clk);
+                       ce->status = PCE_STATUS_ACQUIRED;
+               }
+       }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0107-PM-QoS-Properly-use-the-WARN-macro-in-dev_pm_qos_add.patch b/patches.runtime_pm/0107-PM-QoS-Properly-use-the-WARN-macro-in-dev_pm_qos_add.patch
new file mode 100644 (file)
index 0000000..2b3853e
--- /dev/null
@@ -0,0 +1,66 @@
+From 7c68936add8d8da87bbbc2f2574c38a273e7f1c3 Mon Sep 17 00:00:00 2001
+From: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Date: Thu, 10 Nov 2011 00:44:18 +0100
+Subject: PM / QoS: Properly use the WARN() macro in dev_pm_qos_add_request()
+
+Make dev_pm_qos_add_request() use WARN() in a better way and do not hardcode
+the function's name into the message (use __func__ instead).
+
+Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit af4c720efc0507e01b89774fed936087baac4107)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/qos.c |   18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index 30a94ea..86de6c5 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+       if (!dev || !req) /*guard against callers passing in null */
+               return -EINVAL;
+-      if (dev_pm_qos_request_active(req)) {
+-              WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
+-                      "added request\n");
++      if (WARN(dev_pm_qos_request_active(req),
++               "%s() called for already added request\n", __func__))
+               return -EINVAL;
+-      }
+       req->dev = dev;
+@@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+       if (!req) /*guard against callers passing in null */
+               return -EINVAL;
+-      if (!dev_pm_qos_request_active(req)) {
+-              WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
+-                      "unknown object\n");
++      if (WARN(!dev_pm_qos_request_active(req),
++               "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+-      }
+       mutex_lock(&dev_pm_qos_mtx);
+@@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+       if (!req) /*guard against callers passing in null */
+               return -EINVAL;
+-      if (!dev_pm_qos_request_active(req)) {
+-              WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
+-                      "unknown object\n");
++      if (WARN(!dev_pm_qos_request_active(req),
++               "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+-      }
+       mutex_lock(&dev_pm_qos_mtx);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0108-PM-Sleep-Do-not-extend-wakeup-paths-to-devices-with-.patch b/patches.runtime_pm/0108-PM-Sleep-Do-not-extend-wakeup-paths-to-devices-with-.patch
new file mode 100644 (file)
index 0000000..217c62d
--- /dev/null
@@ -0,0 +1,115 @@
+From 5e0e8b7ed287d81ca9e16b607a3440ba160cf871 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 17 Nov 2011 21:39:33 +0100
+Subject: PM Sleep: Do not extend wakeup paths to devices with ignore_children
+ set
+
+Commit 4ca46ff3e0d8c234cb40ebb6457653b59584426c (PM / Sleep: Mark
+devices involved in wakeup signaling during suspend) introduced
+the power.wakeup_path field in struct dev_pm_info to mark devices
+whose children are enabled to wake up the system from sleep states,
+so that power domains containing the parents that provide their
+children with wakeup power and/or relay their wakeup signals are not
+turned off.  Unfortunately, that introduced a PM regression on SH7372
+whose power consumption in the system "memory sleep" state increased
+as a result of it, because it prevented the power domain containing
+the I2C controller from being turned off when some children of that
+controller were enabled to wake up the system, although the
+controller was not necessary for them to signal wakeup.
+
+To fix this issue use the observation that devices whose
+power.ignore_children flag is set for runtime PM should be treated
+analogously during system suspend.  Namely, they shouldn't be
+included in wakeup paths going through their children.  Since the
+SH7372 I2C controller's power.ignore_children flag is set, doing so
+will restore the previous behavior of that SOC.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 8b258cc8ac229aa7d5dcb7cc34cb35d9124498ac)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/main.c  |    3 ++-
+ include/linux/device.h     |    5 +++++
+ include/linux/pm.h         |    2 +-
+ include/linux/pm_runtime.h |    6 ------
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 7fa0984..c3d2dfc 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+  End:
+       if (!error) {
+               dev->power.is_suspended = true;
+-              if (dev->power.wakeup_path && dev->parent)
++              if (dev->power.wakeup_path
++                  && dev->parent && !dev->parent->power.ignore_children)
+                       dev->parent->power.wakeup_path = true;
+       }
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 4980cc0..e483598 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -743,6 +743,11 @@ static inline bool device_async_suspend_enabled(struct device *dev)
+       return !!dev->power.async_suspend;
+ }
++static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
++{
++      dev->power.ignore_children = enable;
++}
++
+ static inline void device_lock(struct device *dev)
+ {
+       mutex_lock(&dev->mutex);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index f15acb6..5c4c8b1 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -447,6 +447,7 @@ struct dev_pm_info {
+       unsigned int            async_suspend:1;
+       bool                    is_prepared:1;  /* Owned by the PM core */
+       bool                    is_suspended:1; /* Ditto */
++      bool                    ignore_children:1;
+       spinlock_t              lock;
+ #ifdef CONFIG_PM_SLEEP
+       struct list_head        entry;
+@@ -464,7 +465,6 @@ struct dev_pm_info {
+       atomic_t                usage_count;
+       atomic_t                child_count;
+       unsigned int            disable_depth:3;
+-      unsigned int            ignore_children:1;
+       unsigned int            idle_notification:1;
+       unsigned int            request_pending:1;
+       unsigned int            deferred_resume:1;
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index d8d9036..d3085e7 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -52,11 +52,6 @@ static inline bool pm_children_suspended(struct device *dev)
+               || !atomic_read(&dev->power.child_count);
+ }
+-static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+-{
+-      dev->power.ignore_children = enable;
+-}
+-
+ static inline void pm_runtime_get_noresume(struct device *dev)
+ {
+       atomic_inc(&dev->power.usage_count);
+@@ -130,7 +125,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
+ static inline void pm_runtime_forbid(struct device *dev) {}
+ static inline bool pm_children_suspended(struct device *dev) { return false; }
+-static inline void pm_suspend_ignore_children(struct device *dev, bool en) {}
+ static inline void pm_runtime_get_noresume(struct device *dev) {}
+ static inline void pm_runtime_put_noidle(struct device *dev) {}
+ static inline bool device_run_wake(struct device *dev) { return false; }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0109-PM-Hibernate-Fix-the-early-termination-of-test-modes.patch b/patches.runtime_pm/0109-PM-Hibernate-Fix-the-early-termination-of-test-modes.patch
new file mode 100644 (file)
index 0000000..f3616b7
--- /dev/null
@@ -0,0 +1,81 @@
+From 9bb67d0418b0358569732bce9ac4778975194b4d Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Fri, 18 Nov 2011 23:02:42 +0100
+Subject: PM / Hibernate: Fix the early termination of test modes
+
+Commit 2aede851ddf08666f68ffc17be446420e9d2a056
+(PM / Hibernate: Freeze kernel threads after preallocating memory)
+postponed the freezing of kernel threads to after preallocating memory
+for hibernation. But while doing that, the hibernation test TEST_FREEZER
+and the test mode HIBERNATION_TESTPROC were not moved accordingly.
+
+As a result, when using these test modes, it only goes upto the freezing of
+userspace and exits, when in fact it should go till the complete end of task
+freezing stage, namely the freezing of kernel threads as well.
+
+So, move these points of exit to appropriate places so that freezing of
+kernel threads is also tested while using these test harnesses.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit aa9a7b11821e883a7b93ecce190881e0ea48648b)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 3987d43..62c887e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -55,6 +55,8 @@ enum {
+ static int hibernation_mode = HIBERNATION_SHUTDOWN;
++static bool freezer_test_done;
++
+ static const struct platform_hibernation_ops *hibernation_ops;
+ /**
+@@ -347,6 +349,17 @@ int hibernation_snapshot(int platform_mode)
+       if (error)
+               goto Close;
++      if (hibernation_test(TEST_FREEZER) ||
++              hibernation_testmode(HIBERNATION_TESTPROC)) {
++
++              /*
++               * Indicate to the caller that we are returning due to a
++               * successful freezer test.
++               */
++              freezer_test_done = true;
++              goto Close;
++      }
++
+       error = dpm_prepare(PMSG_FREEZE);
+       if (error)
+               goto Complete_devices;
+@@ -641,15 +654,13 @@ int hibernate(void)
+       if (error)
+               goto Free_bitmaps;
+-      if (hibernation_test(TEST_FREEZER))
+-              goto Thaw;
+-
+-      if (hibernation_testmode(HIBERNATION_TESTPROC))
+-              goto Thaw;
+-
+       error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
+       if (error)
+               goto Thaw;
++      if (freezer_test_done) {
++              freezer_test_done = false;
++              goto Thaw;
++      }
+       if (in_suspend) {
+               unsigned int flags = 0;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0110-PM-Suspend-Fix-bug-in-suspend-statistics-update.patch b/patches.runtime_pm/0110-PM-Suspend-Fix-bug-in-suspend-statistics-update.patch
new file mode 100644 (file)
index 0000000..0ea8969
--- /dev/null
@@ -0,0 +1,45 @@
+From 9457d19470ed5fa150914c65e231262337224726 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Sat, 19 Nov 2011 14:37:57 +0100
+Subject: PM / Suspend: Fix bug in suspend statistics update
+
+After commit 2a77c46de1e3dace73745015635ebbc648eca69c
+(PM / Suspend: Add statistics debugfs file for suspend to RAM)
+a missing pair of braces inside the state_store() function causes even
+invalid arguments to suspend to be wrongly treated as failed suspend
+attempts. Fix this.
+
+[rjw: Put the hash/subject of the buggy commit into the changelog.]
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 501a708f18ef911328ffd39f39738b8a7862aa8e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/main.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 71f49fe..36e0f09 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -290,13 +290,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+               if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
+                       break;
+       }
+-      if (state < PM_SUSPEND_MAX && *s)
++      if (state < PM_SUSPEND_MAX && *s) {
+               error = enter_state(state);
+               if (error) {
+                       suspend_stats.fail++;
+                       dpm_save_failed_errno(error);
+               } else
+                       suspend_stats.success++;
++      }
+ #endif
+  Exit:
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0111-freezer-don-t-unnecessarily-set-PF_NOFREEZE-explicit.patch b/patches.runtime_pm/0111-freezer-don-t-unnecessarily-set-PF_NOFREEZE-explicit.patch
new file mode 100644 (file)
index 0000000..ccc2837
--- /dev/null
@@ -0,0 +1,86 @@
+From 401b933116a892dc8fe3e39ae7c1aec7ea94b628 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:22 -0800
+Subject: freezer: don't unnecessarily set PF_NOFREEZE explicitly
+
+Some drivers set PF_NOFREEZE in their kthread functions which is
+completely unnecessary and racy - some part of freezer code doesn't
+consider cases where PF_NOFREEZE is set asynchronous to freezer
+operations.
+
+In general, there's no reason to allow setting PF_NOFREEZE explicitly.
+Remove them and change the documentation to note that setting
+PF_NOFREEZE directly isn't allowed.
+
+-v2: Dropped change to twl4030-irq.c as it no longer uses PF_NOFREEZE.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: "Gustavo F. Padovan" <padovan@profusion.mobi>
+Acked-by: Samuel Ortiz <sameo@linux.intel.com>
+Cc: Marcel Holtmann <marcel@holtmann.org>
+Cc: wwang <wei_wang@realsil.com.cn>
+(cherry picked from commit 3a7cbd50f74907580eb47a8d08e1f29741b81abf)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/freezing-of-tasks.txt |    2 +-
+ drivers/bluetooth/btmrvl_main.c           |    2 --
+ drivers/mfd/twl6030-irq.c                 |    2 --
+ drivers/staging/rts_pstor/rtsx.c          |    2 --
+ 4 files changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
+index 316c2ba..587e082 100644
+--- a/Documentation/power/freezing-of-tasks.txt
++++ b/Documentation/power/freezing-of-tasks.txt
+@@ -67,7 +67,7 @@ III. Which kernel threads are freezable?
+ Kernel threads are not freezable by default.  However, a kernel thread may clear
+ PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
+-directly is strongly discouraged).  From this point it is regarded as freezable
++directly is not allowed).  From this point it is regarded as freezable
+ and must call try_to_freeze() in a suitable place.
+ IV. Why do we do that?
+diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
+index 548d1d9..57312d4 100644
+--- a/drivers/bluetooth/btmrvl_main.c
++++ b/drivers/bluetooth/btmrvl_main.c
+@@ -473,8 +473,6 @@ static int btmrvl_service_main_thread(void *data)
+       init_waitqueue_entry(&wait, current);
+-      current->flags |= PF_NOFREEZE;
+-
+       for (;;) {
+               add_wait_queue(&thread->wait_q, &wait);
+diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
+index b0563b6..71990f9 100644
+--- a/drivers/mfd/twl6030-irq.c
++++ b/drivers/mfd/twl6030-irq.c
+@@ -96,8 +96,6 @@ static int twl6030_irq_thread(void *data)
+       static const unsigned max_i2c_errors = 100;
+       int ret;
+-      current->flags |= PF_NOFREEZE;
+-
+       while (!kthread_should_stop()) {
+               int i;
+               union {
+diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
+index 5ff59f2..2f35832 100644
+--- a/drivers/staging/rts_pstor/rtsx.c
++++ b/drivers/staging/rts_pstor/rtsx.c
+@@ -472,8 +472,6 @@ static int rtsx_control_thread(void *__dev)
+       struct rtsx_chip *chip = dev->chip;
+       struct Scsi_Host *host = rtsx_to_host(dev);
+-      current->flags |= PF_NOFREEZE;
+-
+       for (;;) {
+               if (down_interruptible(&dev->sema))
+                       break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0112-freezer-fix-current-state-restoration-race-in-refrig.patch b/patches.runtime_pm/0112-freezer-fix-current-state-restoration-race-in-refrig.patch
new file mode 100644 (file)
index 0000000..cf53d4c
--- /dev/null
@@ -0,0 +1,57 @@
+From 080f92ef995d70e0acc2c5cfa99d52a705b811c3 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:22 -0800
+Subject: freezer: fix current->state restoration race in refrigerator()
+
+refrigerator() saves current->state before entering frozen state and
+restores it before returning using __set_current_state(); however,
+this is racy, for example, please consider the following sequence.
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       try_to_freeze();
+       if (kthread_should_stop())
+               break;
+       schedule();
+
+If kthread_stop() races with ->state restoration, the restoration can
+restore ->state to TASK_INTERRUPTIBLE after kthread_stop() sets it to
+TASK_RUNNING but kthread_should_stop() may still see zero
+->should_stop because there's no memory barrier between restoring
+TASK_INTERRUPTIBLE and kthread_should_stop() test.
+
+This isn't restricted to kthread_should_stop().  current->state is
+often used in memory barrier based synchronization and silently
+restoring it w/o mb breaks them.
+
+Use set_current_state() instead.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+(cherry picked from commit 50fb4f7fc907efff65eadb0b74387a9ffed6e849)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/freezer.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 7b01de9..575f863 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -58,7 +58,13 @@ void refrigerator(void)
+       current->flags &= ~PF_FREEZING;
+       pr_debug("%s left refrigerator\n", current->comm);
+-      __set_current_state(save);
++
++      /*
++       * Restore saved task state before returning.  The mb'd version
++       * needs to be used; otherwise, it might silently break
++       * synchronization which depends on ordered task state change.
++       */
++      set_current_state(save);
+ }
+ EXPORT_SYMBOL(refrigerator);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0113-freezer-unexport-refrigerator-and-update-try_to_free.patch b/patches.runtime_pm/0113-freezer-unexport-refrigerator-and-update-try_to_free.patch
new file mode 100644 (file)
index 0000000..90922af
--- /dev/null
@@ -0,0 +1,369 @@
+From b94764b22361f331890c56fe2f0f278d82f0eff1 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:22 -0800
+Subject: freezer: unexport refrigerator() and update try_to_freeze() slightly
+
+There is no reason to export two functions for entering the
+refrigerator.  Calling refrigerator() instead of try_to_freeze()
+doesn't save anything noticeable or removes any race condition.
+
+* Rename refrigerator() to __refrigerator() and make it return bool
+  indicating whether it scheduled out for freezing.
+
+* Update try_to_freeze() to return bool and relay the return value of
+  __refrigerator() if freezing().
+
+* Convert all refrigerator() users to try_to_freeze().
+
+* Update documentation accordingly.
+
+* While at it, add might_sleep() to try_to_freeze().
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Samuel Ortiz <samuel@sortiz.org>
+Cc: Chris Mason <chris.mason@oracle.com>
+Cc: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Steven Whitehouse <swhiteho@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
+Cc: Christoph Hellwig <hch@infradead.org>
+(cherry picked from commit a0acae0e886d44bd5ce6d2f173c1ace0fcf0d9f6)
+
+Conflicts:
+
+       fs/xfs/xfs_buf.c
+       fs/xfs/linux-2.6/xfs_buf.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/freezing-of-tasks.txt |   12 ++++++------
+ drivers/net/irda/stir4200.c               |    2 +-
+ fs/btrfs/async-thread.c                   |    2 +-
+ fs/btrfs/disk-io.c                        |    8 ++------
+ fs/ext4/super.c                           |    3 +--
+ fs/gfs2/log.c                             |    4 ++--
+ fs/gfs2/quota.c                           |    4 ++--
+ fs/jbd/journal.c                          |    2 +-
+ fs/jbd2/journal.c                         |    2 +-
+ fs/jfs/jfs_logmgr.c                       |    2 +-
+ fs/jfs/jfs_txnmgr.c                       |    4 ++--
+ fs/nilfs2/segment.c                       |    2 +-
+ fs/xfs/linux-2.6/xfs_buf.c                |    2 +-
+ include/linux/freezer.h                   |   17 ++++++++---------
+ kernel/freezer.c                          |   10 +++++++---
+ 15 files changed, 37 insertions(+), 39 deletions(-)
+
+diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
+index 587e082..3ab9fbd 100644
+--- a/Documentation/power/freezing-of-tasks.txt
++++ b/Documentation/power/freezing-of-tasks.txt
+@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called.  It executes
+ try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
+ either wakes them up, if they are kernel threads, or sends fake signals to them,
+ if they are user space processes.  A task that has TIF_FREEZE set, should react
+-to it by calling the function called refrigerator() (defined in
++to it by calling the function called __refrigerator() (defined in
+ kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
+ to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
+ Then, we say that the task is 'frozen' and therefore the set of functions
+@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
+ defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
+ User space processes are generally frozen before kernel threads.
+-It is not recommended to call refrigerator() directly.  Instead, it is
+-recommended to use the try_to_freeze() function (defined in
+-include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
+-task enter refrigerator() if the flag is set.
++__refrigerator() must not be called directly.  Instead, use the
++try_to_freeze() function (defined in include/linux/freezer.h), that checks
++the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
++flag is set.
+ For user space processes try_to_freeze() is called automatically from the
+ signal-handling code, but the freezable kernel threads need to call it
+@@ -61,7 +61,7 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
+ After the system memory state has been restored from a hibernation image and
+ devices have been reinitialized, the function thaw_processes() is called in
+ order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that
+-have been frozen leave refrigerator() and continue running.
++have been frozen leave __refrigerator() and continue running.
+ III. Which kernel threads are freezable?
+diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
+index 41c96b3..e880c79 100644
+--- a/drivers/net/irda/stir4200.c
++++ b/drivers/net/irda/stir4200.c
+@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
+                       write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
+-                      refrigerator();
++                      try_to_freeze();
+                       if (change_speed(stir, stir->speed))
+                               break;
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 7ec1409..98ab240 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -340,7 +340,7 @@ again:
+               if (freezing(current)) {
+                       worker->working = 0;
+                       spin_unlock_irq(&worker->lock);
+-                      refrigerator();
++                      try_to_freeze();
+               } else {
+                       spin_unlock_irq(&worker->lock);
+                       if (!kthread_should_stop()) {
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 1ac8db5d..66f4b9e 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1479,9 +1479,7 @@ static int cleaner_kthread(void *arg)
+                       btrfs_run_defrag_inodes(root->fs_info);
+               }
+-              if (freezing(current)) {
+-                      refrigerator();
+-              } else {
++              if (!try_to_freeze()) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       if (!kthread_should_stop())
+                               schedule();
+@@ -1535,9 +1533,7 @@ sleep:
+               wake_up_process(root->fs_info->cleaner_kthread);
+               mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+-              if (freezing(current)) {
+-                      refrigerator();
+-              } else {
++              if (!try_to_freeze()) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       if (!kthread_should_stop() &&
+                           !btrfs_transaction_blocked(root->fs_info))
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index df121b2..8de0a54 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2820,8 +2820,7 @@ cont_thread:
+               }
+               mutex_unlock(&eli->li_list_mtx);
+-              if (freezing(current))
+-                      refrigerator();
++              try_to_freeze();
+               cur = jiffies;
+               if ((time_after_eq(cur, next_wakeup)) ||
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 85c6292..b342c71 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -951,8 +951,8 @@ int gfs2_logd(void *data)
+                       wake_up(&sdp->sd_log_waitq);
+               t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
+-              if (freezing(current))
+-                      refrigerator();
++
++              try_to_freeze();
+               do {
+                       prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 42e8d23..2ccaaac 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -1431,8 +1431,8 @@ int gfs2_quotad(void *data)
+               /* Check for & recover partially truncated inodes */
+               quotad_check_trunc_list(sdp);
+-              if (freezing(current))
+-                      refrigerator();
++              try_to_freeze();
++
+               t = min(quotad_timeo, statfs_timeo);
+               prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
+diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
+index 9f36384..b889c55 100644
+--- a/fs/jbd/journal.c
++++ b/fs/jbd/journal.c
+@@ -163,7 +163,7 @@ loop:
+                */
+               jbd_debug(1, "Now suspending kjournald\n");
+               spin_unlock(&journal->j_state_lock);
+-              refrigerator();
++              try_to_freeze();
+               spin_lock(&journal->j_state_lock);
+       } else {
+               /*
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 40c5fb7..c7b7079 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -173,7 +173,7 @@ loop:
+                */
+               jbd_debug(1, "Now suspending kjournald2\n");
+               write_unlock(&journal->j_state_lock);
+-              refrigerator();
++              try_to_freeze();
+               write_lock(&journal->j_state_lock);
+       } else {
+               /*
+diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
+index 583636f..c8c2d35 100644
+--- a/fs/jfs/jfs_logmgr.c
++++ b/fs/jfs/jfs_logmgr.c
+@@ -2348,7 +2348,7 @@ int jfsIOWait(void *arg)
+               if (freezing(current)) {
+                       spin_unlock_irq(&log_redrive_lock);
+-                      refrigerator();
++                      try_to_freeze();
+               } else {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       spin_unlock_irq(&log_redrive_lock);
+diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
+index f6cc0c0..8a2f83b 100644
+--- a/fs/jfs/jfs_txnmgr.c
++++ b/fs/jfs/jfs_txnmgr.c
+@@ -2801,7 +2801,7 @@ int jfs_lazycommit(void *arg)
+               if (freezing(current)) {
+                       LAZY_UNLOCK(flags);
+-                      refrigerator();
++                      try_to_freeze();
+               } else {
+                       DECLARE_WAITQUEUE(wq, current);
+@@ -2996,7 +2996,7 @@ int jfs_sync(void *arg)
+               if (freezing(current)) {
+                       TXN_UNLOCK();
+-                      refrigerator();
++                      try_to_freeze();
+               } else {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       TXN_UNLOCK();
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index bb24ab6..0e72ad6 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
+       if (freezing(current)) {
+               spin_unlock(&sci->sc_state_lock);
+-              refrigerator();
++              try_to_freeze();
+               spin_lock(&sci->sc_state_lock);
+       } else {
+               DEFINE_WAIT(wait);
+diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
+index 5e68099..5930e87b 100644
+--- a/fs/xfs/linux-2.6/xfs_buf.c
++++ b/fs/xfs/linux-2.6/xfs_buf.c
+@@ -1764,7 +1764,7 @@ xfsbufd(
+               if (unlikely(freezing(current))) {
+                       set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
+-                      refrigerator();
++                      try_to_freeze();
+               } else {
+                       clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
+               }
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index aa56cf3..eb9c977 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -47,18 +47,17 @@ static inline bool should_send_signal(struct task_struct *p)
+ /* Takes and releases task alloc lock using task_lock() */
+ extern int thaw_process(struct task_struct *p);
+-extern void refrigerator(void);
++extern bool __refrigerator(void);
+ extern int freeze_processes(void);
+ extern int freeze_kernel_threads(void);
+ extern void thaw_processes(void);
+-static inline int try_to_freeze(void)
++static inline bool try_to_freeze(void)
+ {
+-      if (freezing(current)) {
+-              refrigerator();
+-              return 1;
+-      } else
+-              return 0;
++      might_sleep();
++      if (likely(!freezing(current)))
++              return false;
++      return __refrigerator();
+ }
+ extern bool freeze_task(struct task_struct *p, bool sig_only);
+@@ -171,12 +170,12 @@ static inline void set_freeze_flag(struct task_struct *p) {}
+ static inline void clear_freeze_flag(struct task_struct *p) {}
+ static inline int thaw_process(struct task_struct *p) { return 1; }
+-static inline void refrigerator(void) {}
++static inline bool __refrigerator(void) { return false; }
+ static inline int freeze_processes(void) { return -ENOSYS; }
+ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
+ static inline void thaw_processes(void) {}
+-static inline int try_to_freeze(void) { return 0; }
++static inline bool try_to_freeze(void) { return false; }
+ static inline void freezer_do_not_count(void) {}
+ static inline void freezer_count(void) {}
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 575f863..4d59904 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -23,10 +23,11 @@ static inline void frozen_process(void)
+ }
+ /* Refrigerator is place where frozen processes are stored :-). */
+-void refrigerator(void)
++bool __refrigerator(void)
+ {
+       /* Hmm, should we be allowed to suspend when there are realtime
+          processes around? */
++      bool was_frozen = false;
+       long save;
+       task_lock(current);
+@@ -35,7 +36,7 @@ void refrigerator(void)
+               task_unlock(current);
+       } else {
+               task_unlock(current);
+-              return;
++              return was_frozen;
+       }
+       save = current->state;
+       pr_debug("%s entered refrigerator\n", current->comm);
+@@ -51,6 +52,7 @@ void refrigerator(void)
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (!frozen(current))
+                       break;
++              was_frozen = true;
+               schedule();
+       }
+@@ -65,8 +67,10 @@ void refrigerator(void)
+        * synchronization which depends on ordered task state change.
+        */
+       set_current_state(save);
++
++      return was_frozen;
+ }
+-EXPORT_SYMBOL(refrigerator);
++EXPORT_SYMBOL(__refrigerator);
+ static void fake_signal_wake_up(struct task_struct *p)
+ {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0114-oom-thaw-threads-if-oom-killed-thread-is-frozen-befo.patch b/patches.runtime_pm/0114-oom-thaw-threads-if-oom-killed-thread-is-frozen-befo.patch
new file mode 100644 (file)
index 0000000..2e17b60
--- /dev/null
@@ -0,0 +1,53 @@
+From f792e47c3aef0e6346cffbf2c578e4cc9a3e9490 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Mon, 31 Oct 2011 17:07:07 -0700
+Subject: oom: thaw threads if oom killed thread is frozen before deferring
+
+If a thread has been oom killed and is frozen, thaw it before returning to
+the page allocator.  Otherwise, it can stay frozen indefinitely and no
+memory will be freed.
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Reported-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit f660daac474c6f7c2d710100e29b3276a6f4db0a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ mm/oom_kill.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 7c72487..d00e922 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -32,6 +32,7 @@
+ #include <linux/mempolicy.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/freezer.h>
+ int sysctl_panic_on_oom;
+ int sysctl_oom_kill_allocating_task;
+@@ -317,8 +318,11 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
+                * blocked waiting for another task which itself is waiting
+                * for memory. Is there a better alternative?
+                */
+-              if (test_tsk_thread_flag(p, TIF_MEMDIE))
++              if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
++                      if (unlikely(frozen(p)))
++                              thaw_process(p);
+                       return ERR_PTR(-1UL);
++              }
+               if (!p->mm)
+                       continue;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0115-freezer-implement-and-use-kthread_freezable_should_s.patch b/patches.runtime_pm/0115-freezer-implement-and-use-kthread_freezable_should_s.patch
new file mode 100644 (file)
index 0000000..bbf7e99
--- /dev/null
@@ -0,0 +1,239 @@
+From 0f56da2ccf95afff53f0fe299bad8101f817eee4 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:23 -0800
+Subject: freezer: implement and use kthread_freezable_should_stop()
+
+Writeback and thinkpad_acpi have been using thaw_process() to prevent
+deadlock between the freezer and kthread_stop(); unfortunately, this
+is inherently racy - nothing prevents freezing from happening between
+thaw_process() and kthread_stop().
+
+This patch implements kthread_freezable_should_stop() which enters
+refrigerator if necessary but is guaranteed to return if
+kthread_stop() is invoked.  Both thaw_process() users are converted to
+use the new function.
+
+Note that this deadlock condition exists for many of freezable
+kthreads.  They need to be converted to use the new should_stop or
+freezable workqueue.
+
+Tested with synthetic test case.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Oleg Nesterov <oleg@redhat.com>
+(cherry picked from commit 8a32c441c1609f80e55df75422324a1151208f40)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/platform/x86/thinkpad_acpi.c |   15 ++++++---------
+ fs/fs-writeback.c                    |    4 +---
+ include/linux/freezer.h              |    6 +++---
+ include/linux/kthread.h              |    1 +
+ kernel/freezer.c                     |    6 ++++--
+ kernel/kthread.c                     |   25 +++++++++++++++++++++++++
+ mm/backing-dev.c                     |    8 ++------
+ 7 files changed, 42 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 26c5b11..b7dddbb 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
+       u32 poll_mask, event_mask;
+       unsigned int si, so;
+       unsigned long t;
+-      unsigned int change_detector, must_reset;
++      unsigned int change_detector;
+       unsigned int poll_freq;
++      bool was_frozen;
+       mutex_lock(&hotkey_thread_mutex);
+@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
+                               t = 100;        /* should never happen... */
+               }
+               t = msleep_interruptible(t);
+-              if (unlikely(kthread_should_stop()))
++              if (unlikely(kthread_freezable_should_stop(&was_frozen)))
+                       break;
+-              must_reset = try_to_freeze();
+-              if (t > 0 && !must_reset)
++
++              if (t > 0 && !was_frozen)
+                       continue;
+               mutex_lock(&hotkey_thread_data_mutex);
+-              if (must_reset || hotkey_config_change != change_detector) {
++              if (was_frozen || hotkey_config_change != change_detector) {
+                       /* forget old state on thaw or config change */
+                       si = so;
+                       t = 0;
+@@ -2528,10 +2529,6 @@ exit:
+ static void hotkey_poll_stop_sync(void)
+ {
+       if (tpacpi_hotkey_task) {
+-              if (frozen(tpacpi_hotkey_task) ||
+-                  freezing(tpacpi_hotkey_task))
+-                      thaw_process(tpacpi_hotkey_task);
+-
+               kthread_stop(tpacpi_hotkey_task);
+               tpacpi_hotkey_task = NULL;
+               mutex_lock(&hotkey_thread_mutex);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index fe190a8..4e69320 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -912,7 +912,7 @@ int bdi_writeback_thread(void *data)
+       trace_writeback_thread_start(bdi);
+-      while (!kthread_should_stop()) {
++      while (!kthread_freezable_should_stop(NULL)) {
+               /*
+                * Remove own delayed wake-up timer, since we are already awake
+                * and we'll take care of the preriodic write-back.
+@@ -942,8 +942,6 @@ int bdi_writeback_thread(void *data)
+                        */
+                       schedule();
+               }
+-
+-              try_to_freeze();
+       }
+       /* Flush any work that raced with us exiting */
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index eb9c977..91d19c4 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -47,7 +47,7 @@ static inline bool should_send_signal(struct task_struct *p)
+ /* Takes and releases task alloc lock using task_lock() */
+ extern int thaw_process(struct task_struct *p);
+-extern bool __refrigerator(void);
++extern bool __refrigerator(bool check_kthr_stop);
+ extern int freeze_processes(void);
+ extern int freeze_kernel_threads(void);
+ extern void thaw_processes(void);
+@@ -57,7 +57,7 @@ static inline bool try_to_freeze(void)
+       might_sleep();
+       if (likely(!freezing(current)))
+               return false;
+-      return __refrigerator();
++      return __refrigerator(false);
+ }
+ extern bool freeze_task(struct task_struct *p, bool sig_only);
+@@ -170,7 +170,7 @@ static inline void set_freeze_flag(struct task_struct *p) {}
+ static inline void clear_freeze_flag(struct task_struct *p) {}
+ static inline int thaw_process(struct task_struct *p) { return 1; }
+-static inline bool __refrigerator(void) { return false; }
++static inline bool __refrigerator(bool check_kthr_stop) { return false; }
+ static inline int freeze_processes(void) { return -ENOSYS; }
+ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
+ static inline void thaw_processes(void) {}
+diff --git a/include/linux/kthread.h b/include/linux/kthread.h
+index 1e923e5..6c1903d 100644
+--- a/include/linux/kthread.h
++++ b/include/linux/kthread.h
+@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+ void kthread_bind(struct task_struct *k, unsigned int cpu);
+ int kthread_stop(struct task_struct *k);
+ int kthread_should_stop(void);
++bool kthread_freezable_should_stop(bool *was_frozen);
+ void *kthread_data(struct task_struct *k);
+ int kthreadd(void *unused);
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 4d59904..656492c 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -9,6 +9,7 @@
+ #include <linux/module.h>
+ #include <linux/syscalls.h>
+ #include <linux/freezer.h>
++#include <linux/kthread.h>
+ /*
+  * freezing is complete, mark current process as frozen
+@@ -23,7 +24,7 @@ static inline void frozen_process(void)
+ }
+ /* Refrigerator is place where frozen processes are stored :-). */
+-bool __refrigerator(void)
++bool __refrigerator(bool check_kthr_stop)
+ {
+       /* Hmm, should we be allowed to suspend when there are realtime
+          processes around? */
+@@ -50,7 +51,8 @@ bool __refrigerator(void)
+       for (;;) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+-              if (!frozen(current))
++              if (!frozen(current) ||
++                  (check_kthr_stop && kthread_should_stop()))
+                       break;
+               was_frozen = true;
+               schedule();
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 4ba7ccc..a6cbeea 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -59,6 +59,31 @@ int kthread_should_stop(void)
+ EXPORT_SYMBOL(kthread_should_stop);
+ /**
++ * kthread_freezable_should_stop - should this freezable kthread return now?
++ * @was_frozen: optional out parameter, indicates whether %current was frozen
++ *
++ * kthread_should_stop() for freezable kthreads, which will enter
++ * refrigerator if necessary.  This function is safe from kthread_stop() /
++ * freezer deadlock and freezable kthreads should use this function instead
++ * of calling try_to_freeze() directly.
++ */
++bool kthread_freezable_should_stop(bool *was_frozen)
++{
++      bool frozen = false;
++
++      might_sleep();
++
++      if (unlikely(freezing(current)))
++              frozen = __refrigerator(true);
++
++      if (was_frozen)
++              *was_frozen = frozen;
++
++      return kthread_should_stop();
++}
++EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
++
++/**
+  * kthread_data - return data value specified on kthread creation
+  * @task: kthread task in question
+  *
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index b3b122f..c0ee0ff 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -578,14 +578,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+       /*
+        * Finally, kill the kernel thread. We don't need to be RCU
+-       * safe anymore, since the bdi is gone from visibility. Force
+-       * unfreeze of the thread before calling kthread_stop(), otherwise
+-       * it would never exet if it is currently stuck in the refrigerator.
++       * safe anymore, since the bdi is gone from visibility.
+        */
+-      if (bdi->wb.task) {
+-              thaw_process(bdi->wb.task);
++      if (bdi->wb.task)
+               kthread_stop(bdi->wb.task);
+-      }
+ }
+ /*
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0116-freezer-rename-thaw_process-to-__thaw_task-and-simpl.patch b/patches.runtime_pm/0116-freezer-rename-thaw_process-to-__thaw_task-and-simpl.patch
new file mode 100644 (file)
index 0000000..2b9ac96
--- /dev/null
@@ -0,0 +1,158 @@
+From 1982fa9532742d5d986ce6fa9c8253e32de89b16 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:23 -0800
+Subject: freezer: rename thaw_process() to __thaw_task() and simplify the
+ implementation
+
+thaw_process() now has only internal users - system and cgroup
+freezers.  Remove the unnecessary return value, rename, unexport and
+collapse __thaw_process() into it.  This will help further updates to
+the freezer code.
+
+-v3: oom_kill grew a use of thaw_process() while this patch was
+     pending.  Convert it to use __thaw_task() for now.  In the longer
+     term, this should be handled by allowing tasks to die if killed
+     even if it's frozen.
+
+-v2: minor style update as suggested by Matt.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Paul Menage <menage@google.com>
+Cc: Matt Helsley <matthltc@us.ibm.com>
+(cherry picked from commit a5be2d0d1a8746e7be5210e3d6b904455000443c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/freezer.h |    3 +--
+ kernel/cgroup_freezer.c |    7 +++----
+ kernel/freezer.c        |   31 ++++++++++++-------------------
+ kernel/power/process.c  |    2 +-
+ mm/oom_kill.c           |    2 +-
+ 5 files changed, 18 insertions(+), 27 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 91d19c4..0d161c5 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -45,7 +45,7 @@ static inline bool should_send_signal(struct task_struct *p)
+ }
+ /* Takes and releases task alloc lock using task_lock() */
+-extern int thaw_process(struct task_struct *p);
++extern void __thaw_task(struct task_struct *t);
+ extern bool __refrigerator(bool check_kthr_stop);
+ extern int freeze_processes(void);
+@@ -168,7 +168,6 @@ static inline int frozen(struct task_struct *p) { return 0; }
+ static inline int freezing(struct task_struct *p) { return 0; }
+ static inline void set_freeze_flag(struct task_struct *p) {}
+ static inline void clear_freeze_flag(struct task_struct *p) {}
+-static inline int thaw_process(struct task_struct *p) { return 1; }
+ static inline bool __refrigerator(bool check_kthr_stop) { return false; }
+ static inline int freeze_processes(void) { return -ENOSYS; }
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index a3f638a..bd03e22 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -130,7 +130,7 @@ struct cgroup_subsys freezer_subsys;
+  *   write_lock css_set_lock (cgroup iterator start)
+  *    task->alloc_lock
+  *   read_lock css_set_lock (cgroup iterator start)
+- *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
++ *    task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
+  *     sighand->siglock
+  */
+ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
+@@ -307,9 +307,8 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+       struct task_struct *task;
+       cgroup_iter_start(cgroup, &it);
+-      while ((task = cgroup_iter_next(cgroup, &it))) {
+-              thaw_process(task);
+-      }
++      while ((task = cgroup_iter_next(cgroup, &it)))
++              __thaw_task(task);
+       cgroup_iter_end(cgroup, &it);
+       freezer->state = CGROUP_THAWED;
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 656492c..8b88d04 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -145,18 +145,8 @@ void cancel_freezing(struct task_struct *p)
+       }
+ }
+-static int __thaw_process(struct task_struct *p)
+-{
+-      if (frozen(p)) {
+-              p->flags &= ~PF_FROZEN;
+-              return 1;
+-      }
+-      clear_freeze_flag(p);
+-      return 0;
+-}
+-
+ /*
+- * Wake up a frozen process
++ * Wake up a frozen task
+  *
+  * task_lock() is needed to prevent the race with refrigerator() which may
+  * occur if the freezing of tasks fails.  Namely, without the lock, if the
+@@ -164,15 +154,18 @@ static int __thaw_process(struct task_struct *p)
+  * refrigerator() could call frozen_process(), in which case the task would be
+  * frozen and no one would thaw it.
+  */
+-int thaw_process(struct task_struct *p)
++void __thaw_task(struct task_struct *p)
+ {
++      bool was_frozen;
++
+       task_lock(p);
+-      if (__thaw_process(p) == 1) {
+-              task_unlock(p);
+-              wake_up_process(p);
+-              return 1;
+-      }
++      was_frozen = frozen(p);
++      if (was_frozen)
++              p->flags &= ~PF_FROZEN;
++      else
++              clear_freeze_flag(p);
+       task_unlock(p);
+-      return 0;
++
++      if (was_frozen)
++              wake_up_process(p);
+ }
+-EXPORT_SYMBOL(thaw_process);
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index addbbe5..fe27872 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -186,7 +186,7 @@ static void thaw_tasks(bool nosig_only)
+               if (cgroup_freezing_or_frozen(p))
+                       continue;
+-              thaw_process(p);
++              __thaw_task(p);
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+ }
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index d00e922..6b5688f 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -320,7 +320,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
+                */
+               if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
+                       if (unlikely(frozen(p)))
+-                              thaw_process(p);
++                              __thaw_task(p);
+                       return ERR_PTR(-1UL);
+               }
+               if (!p->mm)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0117-freezer-remove-racy-clear_freeze_flag-and-set-PF_NOF.patch b/patches.runtime_pm/0117-freezer-remove-racy-clear_freeze_flag-and-set-PF_NOF.patch
new file mode 100644 (file)
index 0000000..8c459dd
--- /dev/null
@@ -0,0 +1,66 @@
+From 24ddb5b0de8926d6836136d57c54e54f1e791072 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:23 -0800
+Subject: freezer: remove racy clear_freeze_flag() and set PF_NOFREEZE on dead
+ tasks
+
+clear_freeze_flag() in exit_mm() is racy.  Freezing can start
+afterwards.  Remove it.  Skipping freezer for exiting task will be
+properly implemented later.
+
+Also, freezable() was testing exit_state directly to make system
+freezer ignore dead tasks.  Let the exiting task set PF_NOFREEZE after
+entering TASK_DEAD instead.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+(cherry picked from commit a585042f7b933539a0b6bc63650c2d49ffb2e55d)
+
+Conflicts:
+
+       kernel/exit.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/exit.c          |    3 +--
+ kernel/power/process.c |    3 +--
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 303bed2..437d0cf 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -693,8 +693,6 @@ static void exit_mm(struct task_struct * tsk)
+       tsk->mm = NULL;
+       up_read(&mm->mmap_sem);
+       enter_lazy_tlb(mm, current);
+-      /* We don't want this task to be frozen prematurely */
+-      clear_freeze_flag(tsk);
+       if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+               atomic_dec(&mm->oom_disable_count);
+       task_unlock(tsk);
+@@ -1051,6 +1049,7 @@ NORET_TYPE void do_exit(long code)
+       exit_rcu();
+       /* causes final put_task_struct in finish_task_switch(). */
+       tsk->state = TASK_DEAD;
++      tsk->flags |= PF_NOFREEZE;      /* tell freezer to ignore us */
+       schedule();
+       BUG();
+       /* Avoid "noreturn function does return".  */
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index fe27872..23822dc 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -25,8 +25,7 @@
+ static inline int freezable(struct task_struct * p)
+ {
+       if ((p == current) ||
+-          (p->flags & PF_NOFREEZE) ||
+-          (p->exit_state != 0))
++          (p->flags & PF_NOFREEZE))
+               return 0;
+       return 1;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0118-freezer-don-t-distinguish-nosig-tasks-on-thaw.patch b/patches.runtime_pm/0118-freezer-don-t-distinguish-nosig-tasks-on-thaw.patch
new file mode 100644 (file)
index 0000000..56c84fb
--- /dev/null
@@ -0,0 +1,68 @@
+From c5c4b7c42e328228085d8f904bf411dcd58d6012 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:23 -0800
+Subject: freezer: don't distinguish nosig tasks on thaw
+
+There's no point in thawing nosig tasks before others.  There's no
+ordering requirement between the two groups on thaw, which the staged
+thawing can't guarantee anyway.  Simplify thaw_processes() by removing
+the distinction and collapsing thaw_tasks() into thaw_processes().
+This will help further updates to freezer.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+(cherry picked from commit 6cd8dedcdd8e8de01391a7cf25f0b2afeb24f8f4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/process.c |   20 +++++++-------------
+ 1 file changed, 7 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 23822dc..9db048f 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -170,34 +170,28 @@ int freeze_kernel_threads(void)
+       return error;
+ }
+-static void thaw_tasks(bool nosig_only)
++void thaw_processes(void)
+ {
+       struct task_struct *g, *p;
++      oom_killer_enable();
++
++      printk("Restarting tasks ... ");
++
++      thaw_workqueues();
++
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               if (!freezable(p))
+                       continue;
+-              if (nosig_only && should_send_signal(p))
+-                      continue;
+-
+               if (cgroup_freezing_or_frozen(p))
+                       continue;
+               __thaw_task(p);
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+-}
+-
+-void thaw_processes(void)
+-{
+-      oom_killer_enable();
+-      printk("Restarting tasks ... ");
+-      thaw_workqueues();
+-      thaw_tasks(true);
+-      thaw_tasks(false);
+       schedule();
+       printk("done.\n");
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0119-freezer-use-dedicated-lock-instead-of-task_lock-memo.patch b/patches.runtime_pm/0119-freezer-use-dedicated-lock-instead-of-task_lock-memo.patch
new file mode 100644 (file)
index 0000000..96191c3
--- /dev/null
@@ -0,0 +1,187 @@
+From 80d85d79b1307412028941cfaaa24a34fe7a3e4e Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:24 -0800
+Subject: freezer: use dedicated lock instead of task_lock() + memory barrier
+
+Freezer synchronization is needlessly complicated - it's by no means a
+hot path and the priority is staying unintrusive and safe.  This patch
+makes it simply use a dedicated lock instead of piggy-backing on
+task_lock() and playing with memory barriers.
+
+On the failure path of try_to_freeze_tasks(), locking is moved from it
+to cancel_freezing().  This makes the frozen() test racy but the race
+here is a non-issue as the warning is printed for tasks which failed
+to enter frozen for 20 seconds and race on PF_FROZEN at the last
+moment doesn't change anything.
+
+This simplifies freezer implementation and eases further changes
+including some race fixes.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+(cherry picked from commit 0c9af09262864a2744091ee94c98c4a8fd60c98b)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/freezer.c       |   84 +++++++++++++++++++++---------------------------
+ kernel/power/process.c |    2 --
+ 2 files changed, 37 insertions(+), 49 deletions(-)
+
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 8b88d04..4150835 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -11,17 +11,8 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+-/*
+- * freezing is complete, mark current process as frozen
+- */
+-static inline void frozen_process(void)
+-{
+-      if (!unlikely(current->flags & PF_NOFREEZE)) {
+-              current->flags |= PF_FROZEN;
+-              smp_wmb();
+-      }
+-      clear_freeze_flag(current);
+-}
++/* protects freezing and frozen transitions */
++static DEFINE_SPINLOCK(freezer_lock);
+ /* Refrigerator is place where frozen processes are stored :-). */
+ bool __refrigerator(bool check_kthr_stop)
+@@ -31,14 +22,16 @@ bool __refrigerator(bool check_kthr_stop)
+       bool was_frozen = false;
+       long save;
+-      task_lock(current);
+-      if (freezing(current)) {
+-              frozen_process();
+-              task_unlock(current);
+-      } else {
+-              task_unlock(current);
++      spin_lock_irq(&freezer_lock);
++      if (!freezing(current)) {
++              spin_unlock_irq(&freezer_lock);
+               return was_frozen;
+       }
++      if (!(current->flags & PF_NOFREEZE))
++              current->flags |= PF_FROZEN;
++      clear_freeze_flag(current);
++      spin_unlock_irq(&freezer_lock);
++
+       save = current->state;
+       pr_debug("%s entered refrigerator\n", current->comm);
+@@ -99,21 +92,18 @@ static void fake_signal_wake_up(struct task_struct *p)
+  */
+ bool freeze_task(struct task_struct *p, bool sig_only)
+ {
+-      /*
+-       * We first check if the task is freezing and next if it has already
+-       * been frozen to avoid the race with frozen_process() which first marks
+-       * the task as frozen and next clears its TIF_FREEZE.
+-       */
+-      if (!freezing(p)) {
+-              smp_rmb();
+-              if (frozen(p))
+-                      return false;
+-
+-              if (!sig_only || should_send_signal(p))
+-                      set_freeze_flag(p);
+-              else
+-                      return false;
+-      }
++      unsigned long flags;
++      bool ret = false;
++
++      spin_lock_irqsave(&freezer_lock, flags);
++
++      if (sig_only && !should_send_signal(p))
++              goto out_unlock;
++
++      if (frozen(p))
++              goto out_unlock;
++
++      set_freeze_flag(p);
+       if (should_send_signal(p)) {
+               fake_signal_wake_up(p);
+@@ -123,26 +113,28 @@ bool freeze_task(struct task_struct *p, bool sig_only)
+                * TASK_RUNNING transition can't race with task state
+                * testing in try_to_freeze_tasks().
+                */
+-      } else if (sig_only) {
+-              return false;
+       } else {
+               wake_up_state(p, TASK_INTERRUPTIBLE);
+       }
+-
+-      return true;
++      ret = true;
++out_unlock:
++      spin_unlock_irqrestore(&freezer_lock, flags);
++      return ret;
+ }
+ void cancel_freezing(struct task_struct *p)
+ {
+       unsigned long flags;
++      spin_lock_irqsave(&freezer_lock, flags);
+       if (freezing(p)) {
+               pr_debug("  clean up: %s\n", p->comm);
+               clear_freeze_flag(p);
+-              spin_lock_irqsave(&p->sighand->siglock, flags);
++              spin_lock(&p->sighand->siglock);
+               recalc_sigpending_and_wake(p);
+-              spin_unlock_irqrestore(&p->sighand->siglock, flags);
++              spin_unlock(&p->sighand->siglock);
+       }
++      spin_unlock_irqrestore(&freezer_lock, flags);
+ }
+ /*
+@@ -156,16 +148,14 @@ void cancel_freezing(struct task_struct *p)
+  */
+ void __thaw_task(struct task_struct *p)
+ {
+-      bool was_frozen;
++      unsigned long flags;
+-      task_lock(p);
+-      was_frozen = frozen(p);
+-      if (was_frozen)
++      spin_lock_irqsave(&freezer_lock, flags);
++      if (frozen(p)) {
+               p->flags &= ~PF_FROZEN;
+-      else
+-              clear_freeze_flag(p);
+-      task_unlock(p);
+-
+-      if (was_frozen)
+               wake_up_process(p);
++      } else {
++              clear_freeze_flag(p);
++      }
++      spin_unlock_irqrestore(&freezer_lock, flags);
+ }
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 9db048f..bd420ca 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -118,11 +118,9 @@ static int try_to_freeze_tasks(bool sig_only)
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+-                      task_lock(p);
+                       if (!wakeup && freezing(p) && !freezer_should_skip(p))
+                               sched_show_task(p);
+                       cancel_freezing(p);
+-                      task_unlock(p);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+       } else {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0120-freezer-make-freezing-indicate-freeze-condition-in-e.patch b/patches.runtime_pm/0120-freezer-make-freezing-indicate-freeze-condition-in-e.patch
new file mode 100644 (file)
index 0000000..a83342f
--- /dev/null
@@ -0,0 +1,162 @@
+From 3e983de3cc2c39c65f268cf3aedb76b591f50646 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:24 -0800
+Subject: freezer: make freezing indicate freeze condition in effect
+
+Currently freezing (TIF_FREEZE) and frozen (PF_FROZEN) states are
+interlocked - freezing is set to request freeze and when the task
+actually freezes, it clears freezing and sets frozen.
+
+This interlocking makes things more complex than necessary - freezing
+doesn't mean there's freezing condition in effect and frozen doesn't
+match the task actually entering and leaving frozen state (it's
+cleared by the thawing task).
+
+This patch makes freezing indicate that freeze condition is in effect.
+A task enters and stays frozen if freezing.  This makes PF_FROZEN
+manipulation done only by the task itself and prevents wakeup from
+__thaw_task() leaking outside of refrigerator.
+
+The only place which needs to tell freezing && !frozen is
+try_to_freeze_task() to whine about tasks which don't enter frozen.
+It's updated to test the condition explicitly.
+
+With the change, frozen() state my linger after __thaw_task() until
+the task wakes up and exits fridge.  This can trigger BUG_ON() in
+update_if_frozen().  Work it around by testing freezing() && frozen()
+instead of frozen().
+
+-v2: Oleg pointed out missing re-check of freezing() when trying to
+     clear FROZEN and possible spurious BUG_ON() trigger in
+     update_if_frozen().  Both fixed.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Paul Menage <paul@paulmenage.org>
+(cherry picked from commit 6907483b4e803a20f0b48cc9afa3817420ce61c5)
+
+Conflicts:
+
+       kernel/cgroup_freezer.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/cgroup_freezer.c |    2 +-
+ kernel/freezer.c        |   42 ++++++++++++++++++++++++------------------
+ kernel/power/process.c  |    3 ++-
+ 3 files changed, 27 insertions(+), 20 deletions(-)
+
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index bd03e22..d8f5e61 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -238,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
+       cgroup_iter_start(cgroup, &it);
+       while ((task = cgroup_iter_next(cgroup, &it))) {
+               ntotal++;
+-              if (is_task_frozen_enough(task))
++              if (freezing(task) && is_task_frozen_enough(task))
+                       nfrozen++;
+       }
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 4150835..e87f5d9 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -22,14 +22,19 @@ bool __refrigerator(bool check_kthr_stop)
+       bool was_frozen = false;
+       long save;
++      /*
++       * Enter FROZEN.  If NOFREEZE, schedule immediate thawing by
++       * clearing freezing.
++       */
+       spin_lock_irq(&freezer_lock);
++repeat:
+       if (!freezing(current)) {
+               spin_unlock_irq(&freezer_lock);
+               return was_frozen;
+       }
+-      if (!(current->flags & PF_NOFREEZE))
+-              current->flags |= PF_FROZEN;
+-      clear_freeze_flag(current);
++      if (current->flags & PF_NOFREEZE)
++              clear_freeze_flag(current);
++      current->flags |= PF_FROZEN;
+       spin_unlock_irq(&freezer_lock);
+       save = current->state;
+@@ -44,7 +49,7 @@ bool __refrigerator(bool check_kthr_stop)
+       for (;;) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+-              if (!frozen(current) ||
++              if (!freezing(current) ||
+                   (check_kthr_stop && kthread_should_stop()))
+                       break;
+               was_frozen = true;
+@@ -54,6 +59,13 @@ bool __refrigerator(bool check_kthr_stop)
+       /* Remove the accounting blocker */
+       current->flags &= ~PF_FREEZING;
++      /* leave FROZEN */
++      spin_lock_irq(&freezer_lock);
++      if (freezing(current))
++              goto repeat;
++      current->flags &= ~PF_FROZEN;
++      spin_unlock_irq(&freezer_lock);
++
+       pr_debug("%s left refrigerator\n", current->comm);
+       /*
+@@ -137,25 +149,19 @@ void cancel_freezing(struct task_struct *p)
+       spin_unlock_irqrestore(&freezer_lock, flags);
+ }
+-/*
+- * Wake up a frozen task
+- *
+- * task_lock() is needed to prevent the race with refrigerator() which may
+- * occur if the freezing of tasks fails.  Namely, without the lock, if the
+- * freezing of tasks failed, thaw_tasks() might have run before a task in
+- * refrigerator() could call frozen_process(), in which case the task would be
+- * frozen and no one would thaw it.
+- */
+ void __thaw_task(struct task_struct *p)
+ {
+       unsigned long flags;
++      /*
++       * Clear freezing and kick @p if FROZEN.  Clearing is guaranteed to
++       * be visible to @p as waking up implies wmb.  Waking up inside
++       * freezer_lock also prevents wakeups from leaking outside
++       * refrigerator.
++       */
+       spin_lock_irqsave(&freezer_lock, flags);
+-      if (frozen(p)) {
+-              p->flags &= ~PF_FROZEN;
++      clear_freeze_flag(p);
++      if (frozen(p))
+               wake_up_process(p);
+-      } else {
+-              clear_freeze_flag(p);
+-      }
+       spin_unlock_irqrestore(&freezer_lock, flags);
+ }
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index bd420ca..e6e2739 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -118,7 +118,8 @@ static int try_to_freeze_tasks(bool sig_only)
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+-                      if (!wakeup && freezing(p) && !freezer_should_skip(p))
++                      if (!wakeup && !freezer_should_skip(p) &&
++                          freezing(p) && !frozen(p))
+                               sched_show_task(p);
+                       cancel_freezing(p);
+               } while_each_thread(g, p);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0121-freezer-test-freezable-conditions-while-holding-free.patch b/patches.runtime_pm/0121-freezer-test-freezable-conditions-while-holding-free.patch
new file mode 100644 (file)
index 0000000..e845ceb
--- /dev/null
@@ -0,0 +1,87 @@
+From f43f0c2b296e94d5a3f6690f4799d3a83008a6c0 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:24 -0800
+Subject: freezer: test freezable conditions while holding freezer_lock
+
+try_to_freeze_tasks() and thaw_processes() use freezable() and
+frozen() as preliminary tests before initiating operations on a task.
+These are done without any synchronization and hinder with
+synchronization cleanup without any real performance benefits.
+
+In try_to_freeze_tasks(), open code self test and move PF_NOFREEZE and
+frozen() tests inside freezer_lock in freeze_task().
+
+thaw_processes() can simply drop freezable() test as frozen() test in
+__thaw_task() is enough.
+
+Note: This used to be a part of larger patch to fix set_freezable()
+      race.  Separated out to satisfy ordering among dependent fixes.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+(cherry picked from commit 85f1d476653f52c97ca75466b2494e67c1cbd25d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/freezer.c       |    3 ++-
+ kernel/power/process.c |   16 +---------------
+ 2 files changed, 3 insertions(+), 16 deletions(-)
+
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index e87f5d9..a26f0d2 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -109,7 +109,8 @@ bool freeze_task(struct task_struct *p, bool sig_only)
+       spin_lock_irqsave(&freezer_lock, flags);
+-      if (sig_only && !should_send_signal(p))
++      if ((p->flags & PF_NOFREEZE) ||
++          (sig_only && !should_send_signal(p)))
+               goto out_unlock;
+       if (frozen(p))
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index e6e2739..e59676f 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -22,14 +22,6 @@
+  */
+ #define TIMEOUT       (20 * HZ)
+-static inline int freezable(struct task_struct * p)
+-{
+-      if ((p == current) ||
+-          (p->flags & PF_NOFREEZE))
+-              return 0;
+-      return 1;
+-}
+-
+ static int try_to_freeze_tasks(bool sig_only)
+ {
+       struct task_struct *g, *p;
+@@ -52,10 +44,7 @@ static int try_to_freeze_tasks(bool sig_only)
+               todo = 0;
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+-                      if (frozen(p) || !freezable(p))
+-                              continue;
+-
+-                      if (!freeze_task(p, sig_only))
++                      if (p == current || !freeze_task(p, sig_only))
+                               continue;
+                       /*
+@@ -181,9 +170,6 @@ void thaw_processes(void)
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+-              if (!freezable(p))
+-                      continue;
+-
+               if (cgroup_freezing_or_frozen(p))
+                       continue;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0122-freezer-clean-up-freeze_processes-failure-path.patch b/patches.runtime_pm/0122-freezer-clean-up-freeze_processes-failure-path.patch
new file mode 100644 (file)
index 0000000..b393580
--- /dev/null
@@ -0,0 +1,239 @@
+From 72b77c1e8484860b85fb8add50bbd9ddfceb5174 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:24 -0800
+Subject: freezer: clean up freeze_processes() failure path
+
+freeze_processes() failure path is rather messy.  Freezing is canceled
+for workqueues and tasks which aren't frozen yet but frozen tasks are
+left alone and should be thawed by the caller and of course some
+callers (xen and kexec) didn't do it.
+
+This patch updates __thaw_task() to handle cancelation correctly and
+makes freeze_processes() and freeze_kernel_threads() call
+thaw_processes() on failure instead so that the system is fully thawed
+on failure.  Unnecessary [suspend_]thaw_processes() calls are removed
+from kernel/power/hibernate.c, suspend.c and user.c.
+
+While at it, restructure error checking if clause in suspend_prepare()
+to be less weird.
+
+-v2: Srivatsa spotted missing removal of suspend_thaw_processes() in
+     suspend_prepare() and error in commit message.  Updated.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit 03afed8bc296fa70186ba832c1126228bb992465)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/freezer.h  |    1 -
+ kernel/freezer.c         |   25 +++++++++----------------
+ kernel/power/hibernate.c |   15 ++-------------
+ kernel/power/process.c   |   16 ++++++++--------
+ kernel/power/suspend.c   |    8 +++-----
+ kernel/power/user.c      |    4 +---
+ 6 files changed, 23 insertions(+), 46 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 0d161c5..22f1257 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -61,7 +61,6 @@ static inline bool try_to_freeze(void)
+ }
+ extern bool freeze_task(struct task_struct *p, bool sig_only);
+-extern void cancel_freezing(struct task_struct *p);
+ #ifdef CONFIG_CGROUP_FREEZER
+ extern int cgroup_freezing_or_frozen(struct task_struct *task);
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index a26f0d2..96f9667 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -135,21 +135,6 @@ out_unlock:
+       return ret;
+ }
+-void cancel_freezing(struct task_struct *p)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&freezer_lock, flags);
+-      if (freezing(p)) {
+-              pr_debug("  clean up: %s\n", p->comm);
+-              clear_freeze_flag(p);
+-              spin_lock(&p->sighand->siglock);
+-              recalc_sigpending_and_wake(p);
+-              spin_unlock(&p->sighand->siglock);
+-      }
+-      spin_unlock_irqrestore(&freezer_lock, flags);
+-}
+-
+ void __thaw_task(struct task_struct *p)
+ {
+       unsigned long flags;
+@@ -159,10 +144,18 @@ void __thaw_task(struct task_struct *p)
+        * be visible to @p as waking up implies wmb.  Waking up inside
+        * freezer_lock also prevents wakeups from leaking outside
+        * refrigerator.
++       *
++       * If !FROZEN, @p hasn't reached refrigerator, recalc sigpending to
++       * avoid leaving dangling TIF_SIGPENDING behind.
+        */
+       spin_lock_irqsave(&freezer_lock, flags);
+       clear_freeze_flag(p);
+-      if (frozen(p))
++      if (frozen(p)) {
+               wake_up_process(p);
++      } else {
++              spin_lock(&p->sighand->siglock);
++              recalc_sigpending_and_wake(p);
++              spin_unlock(&p->sighand->siglock);
++      }
+       spin_unlock_irqrestore(&freezer_lock, flags);
+ }
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 62c887e..4d8f280 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -607,17 +607,6 @@ static void power_down(void)
+       while(1);
+ }
+-static int prepare_processes(void)
+-{
+-      int error = 0;
+-
+-      if (freeze_processes()) {
+-              error = -EBUSY;
+-              thaw_processes();
+-      }
+-      return error;
+-}
+-
+ /**
+  * hibernate - Carry out system hibernation, including saving the image.
+  */
+@@ -650,7 +639,7 @@ int hibernate(void)
+       sys_sync();
+       printk("done.\n");
+-      error = prepare_processes();
++      error = freeze_processes();
+       if (error)
+               goto Free_bitmaps;
+@@ -812,7 +801,7 @@ static int software_resume(void)
+               goto close_finish;
+       pr_debug("PM: Preparing processes for restore.\n");
+-      error = prepare_processes();
++      error = freeze_processes();
+       if (error) {
+               swsusp_close(FMODE_READ);
+               goto Done;
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index e59676f..ce64383 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -91,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
+       elapsed_csecs = elapsed_csecs64;
+       if (todo) {
+-              /* This does not unfreeze processes that are already frozen
+-               * (we have slightly ugly calling convention in that respect,
+-               * and caller must call thaw_processes() if something fails),
+-               * but it cleans up leftover PF_FREEZE requests.
+-               */
+               printk("\n");
+               printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
+                      "(%d tasks refusing to freeze, wq_busy=%d):\n",
+@@ -103,14 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
+                      elapsed_csecs / 100, elapsed_csecs % 100,
+                      todo - wq_busy, wq_busy);
+-              thaw_workqueues();
+-
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+                       if (!wakeup && !freezer_should_skip(p) &&
+                           freezing(p) && !frozen(p))
+                               sched_show_task(p);
+-                      cancel_freezing(p);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+       } else {
+@@ -123,6 +115,8 @@ static int try_to_freeze_tasks(bool sig_only)
+ /**
+  * freeze_processes - Signal user space processes to enter the refrigerator.
++ *
++ * On success, returns 0.  On failure, -errno and system is fully thawed.
+  */
+ int freeze_processes(void)
+ {
+@@ -137,11 +131,15 @@ int freeze_processes(void)
+       printk("\n");
+       BUG_ON(in_atomic());
++      if (error)
++              thaw_processes();
+       return error;
+ }
+ /**
+  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
++ *
++ * On success, returns 0.  On failure, -errno and system is fully thawed.
+  */
+ int freeze_kernel_threads(void)
+ {
+@@ -155,6 +153,8 @@ int freeze_kernel_threads(void)
+       printk("\n");
+       BUG_ON(in_atomic());
++      if (error)
++              thaw_processes();
+       return error;
+ }
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 4953dc0..d336b27 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -106,13 +106,11 @@ static int suspend_prepare(void)
+               goto Finish;
+       error = suspend_freeze_processes();
+-      if (error) {
+-              suspend_stats.failed_freeze++;
+-              dpm_save_failed_step(SUSPEND_FREEZE);
+-      } else
++      if (!error)
+               return 0;
+-      suspend_thaw_processes();
++      suspend_stats.failed_freeze++;
++      dpm_save_failed_step(SUSPEND_FREEZE);
+       usermodehelper_enable();
+  Finish:
+       pm_notifier_call_chain(PM_POST_SUSPEND);
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 6d8f535..7cc3f5b 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -257,10 +257,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+                       break;
+               error = freeze_processes();
+-              if (error) {
+-                      thaw_processes();
++              if (error)
+                       usermodehelper_enable();
+-              }
+               if (!error)
+                       data->frozen = 1;
+               break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0123-cgroup_freezer-prepare-for-removal-of-TIF_FREEZE.patch b/patches.runtime_pm/0123-cgroup_freezer-prepare-for-removal-of-TIF_FREEZE.patch
new file mode 100644 (file)
index 0000000..f0092e7
--- /dev/null
@@ -0,0 +1,178 @@
+From ea4b5557ce542c3d132b0dda4180477b7e1ad204 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:25 -0800
+Subject: cgroup_freezer: prepare for removal of TIF_FREEZE
+
+TIF_FREEZE will be removed soon and freezing() will directly test
+whether any freezing condition is in effect.  Make the following
+changes in preparation.
+
+* Rename cgroup_freezing_or_frozen() to cgroup_freezing() and make it
+  return bool.
+
+* Make cgroup_freezing() access task_freezer() under rcu read lock
+  instead of task_lock().  This makes the state dereferencing racy
+  against task moving to another cgroup; however, it was already racy
+  without this change as ->state dereference wasn't synchronized.
+  This will be later dealt with using attach hooks.
+
+* freezer->state is now set before trying to push tasks into the
+  target state.
+
+-v2: Oleg pointed out that freeze_change_state() was setting
+     freeze->state incorrectly to CGROUP_FROZEN instead of
+     CGROUP_FREEZING.  Fixed.
+
+-v3: Matt pointed out that setting CGROUP_FROZEN used to always invoke
+     try_to_freeze_cgroup() regardless of the current state.  Patch
+     updated such that the actual freeze/thaw operations are always
+     performed on invocation.  This shouldn't make any difference
+     unless something is broken.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Paul Menage <paul@paulmenage.org>
+Cc: Li Zefan <lizf@cn.fujitsu.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+(cherry picked from commit 22b4e111fa01a1147aa562ceaf18a752a928ef4e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/freezer.h |    6 +++---
+ kernel/cgroup_freezer.c |   40 +++++++++++++---------------------------
+ kernel/power/process.c  |    2 +-
+ 3 files changed, 17 insertions(+), 31 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 22f1257..08a63a7 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -63,11 +63,11 @@ static inline bool try_to_freeze(void)
+ extern bool freeze_task(struct task_struct *p, bool sig_only);
+ #ifdef CONFIG_CGROUP_FREEZER
+-extern int cgroup_freezing_or_frozen(struct task_struct *task);
++extern bool cgroup_freezing(struct task_struct *task);
+ #else /* !CONFIG_CGROUP_FREEZER */
+-static inline int cgroup_freezing_or_frozen(struct task_struct *task)
++static inline bool cgroup_freezing(struct task_struct *task)
+ {
+-      return 0;
++      return false;
+ }
+ #endif /* !CONFIG_CGROUP_FREEZER */
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index d8f5e61..61ef5b5 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
+                           struct freezer, css);
+ }
+-static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
++bool cgroup_freezing(struct task_struct *task)
+ {
+-      enum freezer_state state = task_freezer(task)->state;
+-      return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+-}
++      enum freezer_state state;
++      bool ret;
+-int cgroup_freezing_or_frozen(struct task_struct *task)
+-{
+-      int result;
+-      task_lock(task);
+-      result = __cgroup_freezing_or_frozen(task);
+-      task_unlock(task);
+-      return result;
++      rcu_read_lock();
++      state = task_freezer(task)->state;
++      ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
++      rcu_read_unlock();
++
++      return ret;
+ }
+ /*
+@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
+  * freezer_can_attach():
+  * cgroup_mutex (held by caller of can_attach)
+  *
+- * cgroup_freezing_or_frozen():
+- * task->alloc_lock (to get task's cgroup)
+- *
+  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
+  * freezer->lock
+  *  sighand->siglock (if the cgroup is freezing)
+@@ -184,13 +179,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
+ static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+ {
+-      rcu_read_lock();
+-      if (__cgroup_freezing_or_frozen(tsk)) {
+-              rcu_read_unlock();
+-              return -EBUSY;
+-      }
+-      rcu_read_unlock();
+-      return 0;
++      return cgroup_freezing(tsk) ? -EBUSY : 0;
+ }
+ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+@@ -286,7 +275,6 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+       struct task_struct *task;
+       unsigned int num_cant_freeze_now = 0;
+-      freezer->state = CGROUP_FREEZING;
+       cgroup_iter_start(cgroup, &it);
+       while ((task = cgroup_iter_next(cgroup, &it))) {
+               if (!freeze_task(task, true))
+@@ -310,8 +298,6 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+       while ((task = cgroup_iter_next(cgroup, &it)))
+               __thaw_task(task);
+       cgroup_iter_end(cgroup, &it);
+-
+-      freezer->state = CGROUP_THAWED;
+ }
+ static int freezer_change_state(struct cgroup *cgroup,
+@@ -325,20 +311,20 @@ static int freezer_change_state(struct cgroup *cgroup,
+       spin_lock_irq(&freezer->lock);
+       update_if_frozen(cgroup, freezer);
+-      if (goal_state == freezer->state)
+-              goto out;
+       switch (goal_state) {
+       case CGROUP_THAWED:
++              freezer->state = CGROUP_THAWED;
+               unfreeze_cgroup(cgroup, freezer);
+               break;
+       case CGROUP_FROZEN:
++              freezer->state = CGROUP_FREEZING;
+               retval = try_to_freeze_cgroup(cgroup, freezer);
+               break;
+       default:
+               BUG();
+       }
+-out:
++
+       spin_unlock_irq(&freezer->lock);
+       return retval;
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index ce64383..9f6f5c7 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -170,7 +170,7 @@ void thaw_processes(void)
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+-              if (cgroup_freezing_or_frozen(p))
++              if (cgroup_freezing(p))
+                       continue;
+               __thaw_task(p);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0124-freezer-make-freezing-test-freeze-conditions-in-effe.patch b/patches.runtime_pm/0124-freezer-make-freezing-test-freeze-conditions-in-effe.patch
new file mode 100644 (file)
index 0000000..918d265
--- /dev/null
@@ -0,0 +1,329 @@
+From eb9b27f500cf1bc56a4f10a7d7d3112635305dc7 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:25 -0800
+Subject: freezer: make freezing() test freeze conditions in effect instead of
+ TIF_FREEZE
+
+Using TIF_FREEZE for freezing worked when there was only single
+freezing condition (the PM one); however, now there is also the
+cgroup_freezer and single bit flag is getting clumsy.
+thaw_processes() is already testing whether cgroup freezing in in
+effect to avoid thawing tasks which were frozen by both PM and cgroup
+freezers.
+
+This is racy (nothing prevents race against cgroup freezing) and
+fragile.  A much simpler way is to test actual freeze conditions from
+freezing() - ie. directly test whether PM or cgroup freezing is in
+effect.
+
+This patch adds variables to indicate whether and what type of
+freezing conditions are in effect and reimplements freezing() such
+that it directly tests whether any of the two freezing conditions is
+active and the task should freeze.  On fast path, freezing() is still
+very cheap - it only tests system_freezing_cnt.
+
+This makes the clumsy dancing aroung TIF_FREEZE unnecessary and
+freeze/thaw operations more usual - updating state variables for the
+new state and nudging target tasks so that they notice the new state
+and comply.  As long as the nudging happens after state update, it's
+race-free.
+
+* This allows use of freezing() in freeze_task().  Replace the open
+  coded tests with freezing().
+
+* p != current test is added to warning printing conditions in
+  try_to_freeze_tasks() failure path.  This is necessary as freezing()
+  is now true for the task which initiated freezing too.
+
+-v2: Oleg pointed out that re-freezing FROZEN cgroup could increment
+     system_freezing_cnt.  Fixed.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Paul Menage <paul@paulmenage.org>  (for the cgroup portions)
+(cherry picked from commit a3201227f803ad7fd43180c5195dbe5a2bf998aa)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/freezer.h |   33 +++++++++----------------
+ kernel/cgroup_freezer.c |   10 +++++++-
+ kernel/fork.c           |    1 -
+ kernel/freezer.c        |   62 ++++++++++++++++++++++++++++++-----------------
+ kernel/power/process.c  |   15 +++++++++---
+ 5 files changed, 72 insertions(+), 49 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 08a63a7..e01df77 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -5,8 +5,13 @@
+ #include <linux/sched.h>
+ #include <linux/wait.h>
++#include <linux/atomic.h>
+ #ifdef CONFIG_FREEZER
++extern atomic_t system_freezing_cnt;  /* nr of freezing conds in effect */
++extern bool pm_freezing;              /* PM freezing in effect */
++extern bool pm_nosig_freezing;                /* PM nosig freezing in effect */
++
+ /*
+  * Check if a process has been frozen
+  */
+@@ -15,28 +20,16 @@ static inline int frozen(struct task_struct *p)
+       return p->flags & PF_FROZEN;
+ }
+-/*
+- * Check if there is a request to freeze a process
+- */
+-static inline int freezing(struct task_struct *p)
+-{
+-      return test_tsk_thread_flag(p, TIF_FREEZE);
+-}
++extern bool freezing_slow_path(struct task_struct *p);
+ /*
+- * Request that a process be frozen
+- */
+-static inline void set_freeze_flag(struct task_struct *p)
+-{
+-      set_tsk_thread_flag(p, TIF_FREEZE);
+-}
+-
+-/*
+- * Sometimes we may need to cancel the previous 'freeze' request
++ * Check if there is a request to freeze a process
+  */
+-static inline void clear_freeze_flag(struct task_struct *p)
++static inline bool freezing(struct task_struct *p)
+ {
+-      clear_tsk_thread_flag(p, TIF_FREEZE);
++      if (likely(!atomic_read(&system_freezing_cnt)))
++              return false;
++      return freezing_slow_path(p);
+ }
+ static inline bool should_send_signal(struct task_struct *p)
+@@ -164,9 +157,7 @@ static inline void set_freezable_with_signal(void)
+ })
+ #else /* !CONFIG_FREEZER */
+ static inline int frozen(struct task_struct *p) { return 0; }
+-static inline int freezing(struct task_struct *p) { return 0; }
+-static inline void set_freeze_flag(struct task_struct *p) {}
+-static inline void clear_freeze_flag(struct task_struct *p) {}
++static inline bool freezing(struct task_struct *p) { return false; }
+ static inline bool __refrigerator(bool check_kthr_stop) { return false; }
+ static inline int freeze_processes(void) { return -ENOSYS; }
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index 61ef5b5..ab79783 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -145,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
+ static void freezer_destroy(struct cgroup_subsys *ss,
+                           struct cgroup *cgroup)
+ {
+-      kfree(cgroup_freezer(cgroup));
++      struct freezer *freezer = cgroup_freezer(cgroup);
++
++      if (freezer->state != CGROUP_THAWED)
++              atomic_dec(&system_freezing_cnt);
++      kfree(freezer);
+ }
+ /* task is frozen or will freeze immediately when next it gets woken */
+@@ -314,10 +318,14 @@ static int freezer_change_state(struct cgroup *cgroup,
+       switch (goal_state) {
+       case CGROUP_THAWED:
++              if (freezer->state != CGROUP_THAWED)
++                      atomic_dec(&system_freezing_cnt);
+               freezer->state = CGROUP_THAWED;
+               unfreeze_cgroup(cgroup, freezer);
+               break;
+       case CGROUP_FROZEN:
++              if (freezer->state == CGROUP_THAWED)
++                      atomic_inc(&system_freezing_cnt);
+               freezer->state = CGROUP_FREEZING;
+               retval = try_to_freeze_cgroup(cgroup, freezer);
+               break;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index c66f27f..35799b7 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1002,7 +1002,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
+       new_flags |= PF_FORKNOEXEC;
+       new_flags |= PF_STARTING;
+       p->flags = new_flags;
+-      clear_freeze_flag(p);
+ }
+ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 96f9667..8bf25f4 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -11,9 +11,41 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
++/* total number of freezing conditions in effect */
++atomic_t system_freezing_cnt = ATOMIC_INIT(0);
++EXPORT_SYMBOL(system_freezing_cnt);
++
++/* indicate whether PM freezing is in effect, protected by pm_mutex */
++bool pm_freezing;
++bool pm_nosig_freezing;
++
+ /* protects freezing and frozen transitions */
+ static DEFINE_SPINLOCK(freezer_lock);
++/**
++ * freezing_slow_path - slow path for testing whether a task needs to be frozen
++ * @p: task to be tested
++ *
++ * This function is called by freezing() if system_freezing_cnt isn't zero
++ * and tests whether @p needs to enter and stay in frozen state.  Can be
++ * called under any context.  The freezers are responsible for ensuring the
++ * target tasks see the updated state.
++ */
++bool freezing_slow_path(struct task_struct *p)
++{
++      if (p->flags & PF_NOFREEZE)
++              return false;
++
++      if (pm_nosig_freezing || cgroup_freezing(p))
++              return true;
++
++      if (pm_freezing && !(p->flags & PF_FREEZER_NOSIG))
++              return true;
++
++      return false;
++}
++EXPORT_SYMBOL(freezing_slow_path);
++
+ /* Refrigerator is place where frozen processes are stored :-). */
+ bool __refrigerator(bool check_kthr_stop)
+ {
+@@ -23,17 +55,11 @@ bool __refrigerator(bool check_kthr_stop)
+       long save;
+       /*
+-       * Enter FROZEN.  If NOFREEZE, schedule immediate thawing by
+-       * clearing freezing.
++       * No point in checking freezing() again - the caller already did.
++       * Proceed to enter FROZEN.
+        */
+       spin_lock_irq(&freezer_lock);
+ repeat:
+-      if (!freezing(current)) {
+-              spin_unlock_irq(&freezer_lock);
+-              return was_frozen;
+-      }
+-      if (current->flags & PF_NOFREEZE)
+-              clear_freeze_flag(current);
+       current->flags |= PF_FROZEN;
+       spin_unlock_irq(&freezer_lock);
+@@ -105,18 +131,12 @@ static void fake_signal_wake_up(struct task_struct *p)
+ bool freeze_task(struct task_struct *p, bool sig_only)
+ {
+       unsigned long flags;
+-      bool ret = false;
+       spin_lock_irqsave(&freezer_lock, flags);
+-
+-      if ((p->flags & PF_NOFREEZE) ||
+-          (sig_only && !should_send_signal(p)))
+-              goto out_unlock;
+-
+-      if (frozen(p))
+-              goto out_unlock;
+-
+-      set_freeze_flag(p);
++      if (!freezing(p) || frozen(p)) {
++              spin_unlock_irqrestore(&freezer_lock, flags);
++              return false;
++      }
+       if (should_send_signal(p)) {
+               fake_signal_wake_up(p);
+@@ -129,10 +149,9 @@ bool freeze_task(struct task_struct *p, bool sig_only)
+       } else {
+               wake_up_state(p, TASK_INTERRUPTIBLE);
+       }
+-      ret = true;
+-out_unlock:
++
+       spin_unlock_irqrestore(&freezer_lock, flags);
+-      return ret;
++      return true;
+ }
+ void __thaw_task(struct task_struct *p)
+@@ -149,7 +168,6 @@ void __thaw_task(struct task_struct *p)
+        * avoid leaving dangling TIF_SIGPENDING behind.
+        */
+       spin_lock_irqsave(&freezer_lock, flags);
+-      clear_freeze_flag(p);
+       if (frozen(p)) {
+               wake_up_process(p);
+       } else {
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 9f6f5c7..0beb51e 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -101,7 +101,7 @@ static int try_to_freeze_tasks(bool sig_only)
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+                       if (!wakeup && !freezer_should_skip(p) &&
+-                          freezing(p) && !frozen(p))
++                          p != current && freezing(p) && !frozen(p))
+                               sched_show_task(p);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+@@ -122,7 +122,11 @@ int freeze_processes(void)
+ {
+       int error;
++      if (!pm_freezing)
++              atomic_inc(&system_freezing_cnt);
++
+       printk("Freezing user space processes ... ");
++      pm_freezing = true;
+       error = try_to_freeze_tasks(true);
+       if (!error) {
+               printk("done.");
+@@ -146,6 +150,7 @@ int freeze_kernel_threads(void)
+       int error;
+       printk("Freezing remaining freezable tasks ... ");
++      pm_nosig_freezing = true;
+       error = try_to_freeze_tasks(false);
+       if (!error)
+               printk("done.");
+@@ -162,6 +167,11 @@ void thaw_processes(void)
+ {
+       struct task_struct *g, *p;
++      if (pm_freezing)
++              atomic_dec(&system_freezing_cnt);
++      pm_freezing = false;
++      pm_nosig_freezing = false;
++
+       oom_killer_enable();
+       printk("Restarting tasks ... ");
+@@ -170,9 +180,6 @@ void thaw_processes(void)
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+-              if (cgroup_freezing(p))
+-                      continue;
+-
+               __thaw_task(p);
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0125-Freezer-fix-more-fallout-from-the-thaw_process-renam.patch b/patches.runtime_pm/0125-Freezer-fix-more-fallout-from-the-thaw_process-renam.patch
new file mode 100644 (file)
index 0000000..db0fcb5
--- /dev/null
@@ -0,0 +1,30 @@
+From d55bc830eecb234dfbf881ea1735b43f0950e91f Mon Sep 17 00:00:00 2001
+From: Stephen Rothwell <sfr@canb.auug.org.au>
+Date: Fri, 25 Nov 2011 00:44:55 +0100
+Subject: Freezer: fix more fallout from the thaw_process rename
+
+Commit 944e192db53c "freezer: rename thaw_process() to __thaw_task()
+and simplify the implementation" did not create a !CONFIG_FREEZER version
+of __thaw_task().
+
+Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+---
+ include/linux/freezer.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index e01df77..083ffef 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -158,6 +158,7 @@ static inline void set_freezable_with_signal(void)
+ #else /* !CONFIG_FREEZER */
+ static inline int frozen(struct task_struct *p) { return 0; }
+ static inline bool freezing(struct task_struct *p) { return false; }
++static inline void __thaw_task(struct task_struct *t) {}
+ static inline bool __refrigerator(bool check_kthr_stop) { return false; }
+ static inline int freeze_processes(void) { return -ENOSYS; }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0126-freezer-remove-unused-sig_only-from-freeze_task.patch b/patches.runtime_pm/0126-freezer-remove-unused-sig_only-from-freeze_task.patch
new file mode 100644 (file)
index 0000000..57e3c54
--- /dev/null
@@ -0,0 +1,142 @@
+From ee111bd31cf0e177f3ff57858cfa38fa5434ba6a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2011 12:32:26 -0800
+Subject: freezer: remove unused @sig_only from freeze_task()
+
+After "freezer: make freezing() test freeze conditions in effect
+instead of TIF_FREEZE", freezing() returns authoritative answer on
+whether the current task should freeze or not and freeze_task()
+doesn't need or use @sig_only.  Remove it.
+
+While at it, rewrite function comment for freeze_task() and rename
+@sig_only to @user_only in try_to_freeze_tasks().
+
+This patch doesn't cause any functional change.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+(cherry picked from commit 839e3407d90a810318d17c17ceb3d5928a910704)
+
+Conflicts:
+
+       include/linux/freezer.h
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/freezer.h |    2 +-
+ kernel/cgroup_freezer.c |    4 ++--
+ kernel/freezer.c        |   21 +++++++++------------
+ kernel/power/process.c  |    8 ++++----
+ 4 files changed, 16 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 083ffef..b79db3d 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -53,7 +53,7 @@ static inline bool try_to_freeze(void)
+       return __refrigerator(false);
+ }
+-extern bool freeze_task(struct task_struct *p, bool sig_only);
++extern bool freeze_task(struct task_struct *p);
+ #ifdef CONFIG_CGROUP_FREEZER
+ extern bool cgroup_freezing(struct task_struct *task);
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index ab79783..6d85d96 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -213,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+       /* Locking avoids race with FREEZING -> THAWED transitions. */
+       if (freezer->state == CGROUP_FREEZING)
+-              freeze_task(task, true);
++              freeze_task(task);
+       spin_unlock_irq(&freezer->lock);
+ }
+@@ -281,7 +281,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+       cgroup_iter_start(cgroup, &it);
+       while ((task = cgroup_iter_next(cgroup, &it))) {
+-              if (!freeze_task(task, true))
++              if (!freeze_task(task))
+                       continue;
+               if (is_task_frozen_enough(task))
+                       continue;
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 8bf25f4..9adeebe 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -115,20 +115,17 @@ static void fake_signal_wake_up(struct task_struct *p)
+ }
+ /**
+- *    freeze_task - send a freeze request to given task
+- *    @p: task to send the request to
+- *    @sig_only: if set, the request will only be sent if the task has the
+- *            PF_FREEZER_NOSIG flag unset
+- *    Return value: 'false', if @sig_only is set and the task has
+- *            PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
++ * freeze_task - send a freeze request to given task
++ * @p: task to send the request to
+  *
+- *    The freeze request is sent by setting the tasks's TIF_FREEZE flag and
+- *    either sending a fake signal to it or waking it up, depending on whether
+- *    or not it has PF_FREEZER_NOSIG set.  If @sig_only is set and the task
+- *    has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
+- *    TIF_FREEZE flag will not be set.
++ * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
++ * flag and either sending a fake signal to it or waking it up, depending
++ * on whether it has %PF_FREEZER_NOSIG set.
++ *
++ * RETURNS:
++ * %false, if @p is not freezing or already frozen; %true, otherwise
+  */
+-bool freeze_task(struct task_struct *p, bool sig_only)
++bool freeze_task(struct task_struct *p)
+ {
+       unsigned long flags;
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 0beb51e..77274c9 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -22,7 +22,7 @@
+  */
+ #define TIMEOUT       (20 * HZ)
+-static int try_to_freeze_tasks(bool sig_only)
++static int try_to_freeze_tasks(bool user_only)
+ {
+       struct task_struct *g, *p;
+       unsigned long end_time;
+@@ -37,14 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
+       end_time = jiffies + TIMEOUT;
+-      if (!sig_only)
++      if (!user_only)
+               freeze_workqueues_begin();
+       while (true) {
+               todo = 0;
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+-                      if (p == current || !freeze_task(p, sig_only))
++                      if (p == current || !freeze_task(p))
+                               continue;
+                       /*
+@@ -65,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+-              if (!sig_only) {
++              if (!user_only) {
+                       wq_busy = freeze_workqueues_busy();
+                       todo += wq_busy;
+               }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0127-PM-Hibernate-Do-not-leak-memory-in-error-test-code-p.patch b/patches.runtime_pm/0127-PM-Hibernate-Do-not-leak-memory-in-error-test-code-p.patch
new file mode 100644 (file)
index 0000000..f17e087
--- /dev/null
@@ -0,0 +1,75 @@
+From 8236c20dc38b0234a640c9898f313723dd867f30 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 22 Nov 2011 23:08:10 +0100
+Subject: PM / Hibernate: Do not leak memory in error/test code paths
+
+The hibernation core code forgets to release memory preallocated
+for hibernation if there's an error in its early stages or if test
+modes causing hibernation_snapshot() to return early are used.  This
+causes the system to be hardly usable, because the amount of
+preallocated memory is usually huge.  Fix this problem.
+
+Reported-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit bb58dd5d1ffad6c2d21c69698ba766dad4ae54e6)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 4d8f280..863c14d 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -347,7 +347,7 @@ int hibernation_snapshot(int platform_mode)
+       error = freeze_kernel_threads();
+       if (error)
+-              goto Close;
++              goto Cleanup;
+       if (hibernation_test(TEST_FREEZER) ||
+               hibernation_testmode(HIBERNATION_TESTPROC)) {
+@@ -357,12 +357,14 @@ int hibernation_snapshot(int platform_mode)
+                * successful freezer test.
+                */
+               freezer_test_done = true;
+-              goto Close;
++              goto Cleanup;
+       }
+       error = dpm_prepare(PMSG_FREEZE);
+-      if (error)
+-              goto Complete_devices;
++      if (error) {
++              dpm_complete(msg);
++              goto Cleanup;
++      }
+       suspend_console();
+       pm_restrict_gfp_mask();
+@@ -391,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
+               pm_restore_gfp_mask();
+       resume_console();
+-
+- Complete_devices:
+       dpm_complete(msg);
+  Close:
+@@ -402,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
+  Recover_platform:
+       platform_recover(platform_mode);
+       goto Resume_devices;
++
++ Cleanup:
++      swsusp_free();
++      goto Close;
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0128-PM-Fix-indentation-and-remove-extraneous-whitespaces.patch b/patches.runtime_pm/0128-PM-Fix-indentation-and-remove-extraneous-whitespaces.patch
new file mode 100644 (file)
index 0000000..e8ea3f1
--- /dev/null
@@ -0,0 +1,55 @@
+From 0c9520d1359898b2836cc4360d320ac59b462ce3 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Sat, 19 Nov 2011 14:39:01 +0100
+Subject: PM: Fix indentation and remove extraneous whitespaces in
+ kernel/power/main.c
+
+Lack of proper indentation of the goto statement decreases the readability
+of code significantly. In fact, this made me look twice at the code to check
+whether it really does what it should be doing. Fix this.
+
+And in the same file, there are some extra whitespaces. Get rid of them too.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 341d4166175e9b7911444f5a33b1c9efb8f15c85)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/main.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 36e0f09..7d36fb3 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -3,7 +3,7 @@
+  *
+  * Copyright (c) 2003 Patrick Mochel
+  * Copyright (c) 2003 Open Source Development Lab
+- * 
++ *
+  * This file is released under the GPLv2
+  *
+  */
+@@ -240,7 +240,7 @@ struct kobject *power_kobj;
+  *    'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
+  *    'disk' (Suspend-to-Disk).
+  *
+- *    store() accepts one of those strings, translates it into the 
++ *    store() accepts one of those strings, translates it into the
+  *    proper enumerated value, and initiates a suspend transition.
+  */
+ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+@@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+       /* First, check if we are requested to hibernate */
+       if (len == 4 && !strncmp(buf, "disk", len)) {
+               error = hibernate();
+-  goto Exit;
++              goto Exit;
+       }
+ #ifdef CONFIG_SUSPEND
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0129-PM-Sleep-Remove-unnecessary-label-and-jumps-to-it-fo.patch b/patches.runtime_pm/0129-PM-Sleep-Remove-unnecessary-label-and-jumps-to-it-fo.patch
new file mode 100644 (file)
index 0000000..759b468
--- /dev/null
@@ -0,0 +1,57 @@
+From 0312512b537f69de8a744eb82fa888c44c04e6c1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 21 Nov 2011 23:33:28 +0100
+Subject: PM / Sleep: Remove unnecessary label and jumps to it form PM core
+ code
+
+The "End" label in device_prepare() in drivers/base/power/main.c is
+not necessary and the jumps to it have no real effect, so remove them
+all.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit d74e278aaf3b0fe4b02af67055aa71babcc0cebe)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/main.c |    7 -------
+ 1 file changed, 7 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c3d2dfc..1172aea 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1033,22 +1033,16 @@ static int device_prepare(struct device *dev, pm_message_t state)
+               if (dev->pm_domain->ops.prepare)
+                       error = dev->pm_domain->ops.prepare(dev);
+               suspend_report_result(dev->pm_domain->ops.prepare, error);
+-              if (error)
+-                      goto End;
+       } else if (dev->type && dev->type->pm) {
+               pm_dev_dbg(dev, state, "preparing type ");
+               if (dev->type->pm->prepare)
+                       error = dev->type->pm->prepare(dev);
+               suspend_report_result(dev->type->pm->prepare, error);
+-              if (error)
+-                      goto End;
+       } else if (dev->class && dev->class->pm) {
+               pm_dev_dbg(dev, state, "preparing class ");
+               if (dev->class->pm->prepare)
+                       error = dev->class->pm->prepare(dev);
+               suspend_report_result(dev->class->pm->prepare, error);
+-              if (error)
+-                      goto End;
+       } else if (dev->bus && dev->bus->pm) {
+               pm_dev_dbg(dev, state, "preparing ");
+               if (dev->bus->pm->prepare)
+@@ -1056,7 +1050,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
+               suspend_report_result(dev->bus->pm->prepare, error);
+       }
+- End:
+       device_unlock(dev);
+       return error;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0130-PM-Sleep-Simplify-device_suspend_noirq.patch b/patches.runtime_pm/0130-PM-Sleep-Simplify-device_suspend_noirq.patch
new file mode 100644 (file)
index 0000000..5771ea0
--- /dev/null
@@ -0,0 +1,58 @@
+From 9d07decc482d8e2e0b15f789e8b4846756ae4a45 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 21 Nov 2011 23:33:55 +0100
+Subject: PM / Sleep: Simplify device_suspend_noirq()
+
+Remove a few if () and return statements in device_suspend_noirq()
+that aren't really necessary.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit 64e94aafb6a5c4f419e9b8f93950914b5ac162a9)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/main.c |   12 ++----------
+ 1 file changed, 2 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 1172aea..406f82c 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -763,31 +763,23 @@ static pm_message_t resume_event(pm_message_t sleep_state)
+  */
+ static int device_suspend_noirq(struct device *dev, pm_message_t state)
+ {
+-      int error;
++      int error = 0;
+       if (dev->pm_domain) {
+               pm_dev_dbg(dev, state, "LATE power domain ");
+               error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+-              if (error)
+-                      return error;
+       } else if (dev->type && dev->type->pm) {
+               pm_dev_dbg(dev, state, "LATE type ");
+               error = pm_noirq_op(dev, dev->type->pm, state);
+-              if (error)
+-                      return error;
+       } else if (dev->class && dev->class->pm) {
+               pm_dev_dbg(dev, state, "LATE class ");
+               error = pm_noirq_op(dev, dev->class->pm, state);
+-              if (error)
+-                      return error;
+       } else if (dev->bus && dev->bus->pm) {
+               pm_dev_dbg(dev, state, "LATE ");
+               error = pm_noirq_op(dev, dev->bus->pm, state);
+-              if (error)
+-                      return error;
+       }
+-      return 0;
++      return error;
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0131-PM-Hibernate-Refactor-and-simplify-hibernation_snaps.patch b/patches.runtime_pm/0131-PM-Hibernate-Refactor-and-simplify-hibernation_snaps.patch
new file mode 100644 (file)
index 0000000..424d6ff
--- /dev/null
@@ -0,0 +1,80 @@
+From 33ccf9b8390f1510d56a21cb9070a679ea26c4fe Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Tue, 22 Nov 2011 23:20:31 +0100
+Subject: PM / Hibernate: Refactor and simplify hibernation_snapshot() code
+
+The goto statements in hibernation_snapshot() are a bit complex.
+Refactor the code to remove some of them, thereby simplifying the
+implementation.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 953a206393b1533ceb0e7d725cc5a8c8d7ed97dd)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   22 +++++++++-------------
+ 1 file changed, 9 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 863c14d..6495e6a 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -333,7 +333,7 @@ static int create_image(int platform_mode)
+  */
+ int hibernation_snapshot(int platform_mode)
+ {
+-      pm_message_t msg = PMSG_RECOVER;
++      pm_message_t msg;
+       int error;
+       error = platform_begin(platform_mode);
+@@ -362,26 +362,26 @@ int hibernation_snapshot(int platform_mode)
+       error = dpm_prepare(PMSG_FREEZE);
+       if (error) {
+-              dpm_complete(msg);
++              dpm_complete(PMSG_RECOVER);
+               goto Cleanup;
+       }
+       suspend_console();
+       pm_restrict_gfp_mask();
++
+       error = dpm_suspend(PMSG_FREEZE);
+-      if (error)
+-              goto Recover_platform;
+-      if (hibernation_test(TEST_DEVICES))
+-              goto Recover_platform;
++      if (error || hibernation_test(TEST_DEVICES))
++              platform_recover(platform_mode);
++      else
++              error = create_image(platform_mode);
+-      error = create_image(platform_mode);
+       /*
+-       * Control returns here (1) after the image has been created or the
++       * In the case that we call create_image() above, the control
++       * returns here (1) after the image has been created or the
+        * image creation has failed and (2) after a successful restore.
+        */
+- Resume_devices:
+       /* We may need to release the preallocated image pages here. */
+       if (error || !in_suspend)
+               swsusp_free();
+@@ -399,10 +399,6 @@ int hibernation_snapshot(int platform_mode)
+       platform_end(platform_mode);
+       return error;
+- Recover_platform:
+-      platform_recover(platform_mode);
+-      goto Resume_devices;
+-
+  Cleanup:
+       swsusp_free();
+       goto Close;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0132-PM-Domains-Document-how-PM-domains-are-used-by-the-P.patch b/patches.runtime_pm/0132-PM-Domains-Document-how-PM-domains-are-used-by-the-P.patch
new file mode 100644 (file)
index 0000000..011729d
--- /dev/null
@@ -0,0 +1,122 @@
+From a3f7a2775d5a10dcdb30cc3dac113a06a0ad8242 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 23 Nov 2011 21:18:39 +0100
+Subject: PM / Domains: Document how PM domains are used by the PM core
+
+The current power management documentation in Documentation/power/
+either doesn't cover PM domains at all, or gives inaccurate
+information about them, so update the relevant files in there to
+follow the code.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 5841eb6402707a387b216373e65c9c28e8136663)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt    |   42 +++++++++++++++++++++++-------------
+ Documentation/power/runtime_pm.txt |   29 +++++++++++++++----------
+ 2 files changed, 45 insertions(+), 26 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 646a89e..4342acb 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
+ Subsystem-Level Methods
+ -----------------------
+ The core methods to suspend and resume devices reside in struct dev_pm_ops
+-pointed to by the pm member of struct bus_type, struct device_type and
+-struct class.  They are mostly of interest to the people writing infrastructure
+-for buses, like PCI or USB, or device type and device class drivers.
++pointed to by the ops member of struct dev_pm_domain, or by the pm member of
++struct bus_type, struct device_type and struct class.  They are mostly of
++interest to the people writing infrastructure for platforms and buses, like PCI
++or USB, or device type and device class drivers.
+ Bus drivers implement these methods as appropriate for the hardware and the
+ drivers using it; PCI works differently from USB, and so on.  Not many people
+@@ -251,18 +252,29 @@ various phases always run after tasks have been frozen and before they are
+ unfrozen.  Furthermore, the *_noirq phases run at a time when IRQ handlers have
+ been disabled (except for those marked with the IRQ_WAKEUP flag).
+-All phases use bus, type, or class callbacks (that is, methods defined in
+-dev->bus->pm, dev->type->pm, or dev->class->pm).  These callbacks are mutually
+-exclusive, so if the device type provides a struct dev_pm_ops object pointed to
+-by its pm field (i.e. both dev->type and dev->type->pm are defined), the
+-callbacks included in that object (i.e. dev->type->pm) will be used.  Otherwise,
+-if the class provides a struct dev_pm_ops object pointed to by its pm field
+-(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
+-callbacks from that object (i.e. dev->class->pm).  Finally, if the pm fields of
+-both the device type and class objects are NULL (or those objects do not exist),
+-the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
+-will be used (this allows device types to override callbacks provided by bus
+-types or classes if necessary).
++All phases use PM domain, bus, type, or class callbacks (that is, methods
++defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
++These callbacks are regarded by the PM core as mutually exclusive.  Moreover,
++PM domain callbacks always take precedence over bus, type and class callbacks,
++while type callbacks take precedence over bus and class callbacks, and class
++callbacks take precedence over bus callbacks.  To be precise, the following
++rules are used to determine which callback to execute in the given phase:
++
++    1.        If dev->pm_domain is present, the PM core will attempt to execute the
++      callback included in dev->pm_domain->ops.  If that callback is not
++      present, no action will be carried out for the given device.
++
++    2.        Otherwise, if both dev->type and dev->type->pm are present, the callback
++      included in dev->type->pm will be executed.
++
++    3.        Otherwise, if both dev->class and dev->class->pm are present, the
++      callback included in dev->class->pm will be executed.
++
++    4.        Otherwise, if both dev->bus and dev->bus->pm are present, the callback
++      included in dev->bus->pm will be executed.
++
++This allows PM domains and device types to override callbacks provided by bus
++types or device classes if necessary.
+ These callbacks may in turn invoke device- or driver-specific methods stored in
+ dev->driver->pm, but they don't have to.
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 5336149..79b10a0 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -44,17 +44,24 @@ struct dev_pm_ops {
+ };
+ The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
+-are executed by the PM core for either the power domain, or the device type
+-(if the device power domain's struct dev_pm_ops does not exist), or the class
+-(if the device power domain's and type's struct dev_pm_ops object does not
+-exist), or the bus type (if the device power domain's, type's and class'
+-struct dev_pm_ops objects do not exist) of the given device, so the priority
+-order of callbacks from high to low is that power domain callbacks, device
+-type callbacks, class callbacks and bus type callbacks, and the high priority
+-one will take precedence over low priority one. The bus type, device type and
+-class callbacks are referred to as subsystem-level callbacks in what follows,
+-and generally speaking, the power domain callbacks are used for representing
+-power domains within a SoC.
++are executed by the PM core for the device's subsystem that may be either of
++the following:
++
++  1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
++     is present.
++
++  2. Device type of the device, if both dev->type and dev->type->pm are present.
++
++  3. Device class of the device, if both dev->class and dev->class->pm are
++     present.
++
++  4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
++
++The PM core always checks which callback to use in the order given above, so the
++priority order of callbacks from high to low is: PM domain, device type, class
++and bus type.  Moreover, the high-priority one will always take precedence over
++a low-priority one.  The PM domain, bus type, device type and class callbacks
++are referred to as subsystem-level callbacks in what follows.
+ By default, the callbacks are always invoked in process context with interrupts
+ enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0133-PM-Sleep-Correct-inaccurate-information-in-devices.t.patch b/patches.runtime_pm/0133-PM-Sleep-Correct-inaccurate-information-in-devices.t.patch
new file mode 100644 (file)
index 0000000..e06cdd0
--- /dev/null
@@ -0,0 +1,46 @@
+From 83931ca2a6a92333544568e4cd6d8b5a19f0e4a5 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 23 Nov 2011 21:19:57 +0100
+Subject: PM / Sleep: Correct inaccurate information in devices.txt
+
+The documentation file Documentation/power/devices.txt contains some
+information that isn't correct any more due to code modifications
+made after that file had been created (or updated last time).  Fix
+this.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit fa8ce723936460fcf7e49f508fd5dbd5125e39c4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 4342acb..ed32288 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -250,7 +250,7 @@ for every device before the next phase begins.  Not all busses or classes
+ support all these callbacks and not all drivers use all the callbacks.  The
+ various phases always run after tasks have been frozen and before they are
+ unfrozen.  Furthermore, the *_noirq phases run at a time when IRQ handlers have
+-been disabled (except for those marked with the IRQ_WAKEUP flag).
++been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+ All phases use PM domain, bus, type, or class callbacks (that is, methods
+ defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
+@@ -295,9 +295,8 @@ When the system goes into the standby or memory sleep state, the phases are:
+       After the prepare callback method returns, no new children may be
+       registered below the device.  The method may also prepare the device or
+-      driver in some way for the upcoming system power transition (for
+-      example, by allocating additional memory required for this purpose), but
+-      it should not put the device into a low-power state.
++      driver in some way for the upcoming system power transition, but it
++      should not put the device into a low-power state.
+     2.        The suspend methods should quiesce the device to stop it from performing
+       I/O.  They also may save the device registers and put it into the
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0134-PM-Runtime-Make-documentation-follow-the-new-behavio.patch b/patches.runtime_pm/0134-PM-Runtime-Make-documentation-follow-the-new-behavio.patch
new file mode 100644 (file)
index 0000000..2a0dcc9
--- /dev/null
@@ -0,0 +1,42 @@
+From 953625cd38cfd676b68c0cc687c2da11683861fd Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 23 Nov 2011 21:20:07 +0100
+Subject: PM / Runtime: Make documentation follow the new behavior of irq_safe
+
+The runtime PM core code behavior related to the power.irq_safe
+device flag has changed recently and the documentation should be
+modified to reflect it.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 907565921966260921e4c4581ed8985ef4cf9a67)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/runtime_pm.txt |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index 79b10a0..c2ae8bf 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -65,11 +65,12 @@ are referred to as subsystem-level callbacks in what follows.
+ By default, the callbacks are always invoked in process context with interrupts
+ enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
+-to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
+-callbacks should be invoked in atomic context with interrupts disabled.
+-This implies that these callback routines must not block or sleep, but it also
+-means that the synchronous helper functions listed at the end of Section 4 can
+-be used within an interrupt handler or in an atomic context.
++to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
++->runtime_idle() callbacks may be invoked in atomic context with interrupts
++disabled for a given device.  This implies that the callback routines in
++question must not block or sleep, but it also means that the synchronous helper
++functions listed at the end of Section 4 may be used for that device within an
++interrupt handler or generally in an atomic context.
+ The subsystem-level suspend callback is _entirely_ _responsible_ for handling
+ the suspend of the device as appropriate, which may, but need not include
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0135-PM-Sleep-Update-documentation-related-to-system-wake.patch b/patches.runtime_pm/0135-PM-Sleep-Update-documentation-related-to-system-wake.patch
new file mode 100644 (file)
index 0000000..a27ef7e
--- /dev/null
@@ -0,0 +1,103 @@
+From 8ac6cfa81a4f4fa3995a706409d8768b1bbe4472 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 23 Nov 2011 21:20:15 +0100
+Subject: PM / Sleep: Update documentation related to system wakeup
+
+The system wakeup section of Documentation/power/devices.txt is
+outdated, so make it agree with the current code.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit fafba48d4dd6fcbb1fd7ac4ab0ba22ef45b9796c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt |   60 +++++++++++++++++++++++++--------------
+ 1 file changed, 38 insertions(+), 22 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index ed32288..3139fb5 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -140,41 +140,57 @@ sequencing in the driver model tree.
+ /sys/devices/.../power/wakeup files
+ -----------------------------------
+-All devices in the driver model have two flags to control handling of wakeup
+-events (hardware signals that can force the device and/or system out of a low
+-power state).  These flags are initialized by bus or device driver code using
++All device objects in the driver model contain fields that control the handling
++of system wakeup events (hardware signals that can force the system out of a
++sleep state).  These fields are initialized by bus or device driver code using
+ device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
+ include/linux/pm_wakeup.h.
+-The "can_wakeup" flag just records whether the device (and its driver) can
++The "power.can_wakeup" flag just records whether the device (and its driver) can
+ physically support wakeup events.  The device_set_wakeup_capable() routine
+-affects this flag.  The "should_wakeup" flag controls whether the device should
+-try to use its wakeup mechanism.  device_set_wakeup_enable() affects this flag;
+-for the most part drivers should not change its value.  The initial value of
+-should_wakeup is supposed to be false for the majority of devices; the major
+-exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
+-(wake-on-LAN) feature has been set up with ethtool.  It should also default
+-to true for devices that don't generate wakeup requests on their own but merely
+-forward wakeup requests from one bus to another (like PCI bridges).
++affects this flag.  The "power.wakeup" field is a pointer to an object of type
++struct wakeup_source used for controlling whether or not the device should use
++its system wakeup mechanism and for notifying the PM core of system wakeup
++events signaled by the device.  This object is only present for wakeup-capable
++devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
++removed) by device_set_wakeup_capable().
+ Whether or not a device is capable of issuing wakeup events is a hardware
+ matter, and the kernel is responsible for keeping track of it.  By contrast,
+ whether or not a wakeup-capable device should issue wakeup events is a policy
+ decision, and it is managed by user space through a sysfs attribute: the
+-power/wakeup file.  User space can write the strings "enabled" or "disabled" to
+-set or clear the "should_wakeup" flag, respectively.  This file is only present
+-for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
+-and is created (or removed) by device_set_wakeup_capable().  Reads from the
+-file will return the corresponding string.
+-
+-The device_may_wakeup() routine returns true only if both flags are set.
++"power/wakeup" file.  User space can write the strings "enabled" or "disabled"
++to it to indicate whether or not, respectively, the device is supposed to signal
++system wakeup.  This file is only present if the "power.wakeup" object exists
++for the given device and is created (or removed) along with that object, by
++device_set_wakeup_capable().  Reads from the file will return the corresponding
++string.
++
++The "power/wakeup" file is supposed to contain the "disabled" string initially
++for the majority of devices; the major exceptions are power buttons, keyboards,
++and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
++ethtool.  It should also default to "enabled" for devices that don't generate
++wakeup requests on their own but merely forward wakeup requests from one bus to
++another (like PCI Express ports).
++
++The device_may_wakeup() routine returns true only if the "power.wakeup" object
++exists and the corresponding "power/wakeup" file contains the string "enabled".
+ This information is used by subsystems, like the PCI bus type code, to see
+ whether or not to enable the devices' wakeup mechanisms.  If device wakeup
+ mechanisms are enabled or disabled directly by drivers, they also should use
+ device_may_wakeup() to decide what to do during a system sleep transition.
+-However for runtime power management, wakeup events should be enabled whenever
+-the device and driver both support them, regardless of the should_wakeup flag.
+-
++Device drivers, however, are not supposed to call device_set_wakeup_enable()
++directly in any case.
++
++It ought to be noted that system wakeup is conceptually different from "remote
++wakeup" used by runtime power management, although it may be supported by the
++same physical mechanism.  Remote wakeup is a feature allowing devices in
++low-power states to trigger specific interrupts to signal conditions in which
++they should be put into the full-power state.  Those interrupts may or may not
++be used to signal system wakeup events, depending on the hardware design.  On
++some systems it is impossible to trigger them from system sleep states.  In any
++case, remote wakeup should always be enabled for runtime power management for
++all devices and drivers that support it.
+ /sys/devices/.../power/control files
+ ------------------------------------
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0136-PM-Update-comments-describing-device-power-managemen.patch b/patches.runtime_pm/0136-PM-Update-comments-describing-device-power-managemen.patch
new file mode 100644 (file)
index 0000000..73cf2e7
--- /dev/null
@@ -0,0 +1,309 @@
+From 33f88463457fc9f37691c1425c34a9481c61dc85 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 23 Nov 2011 21:20:32 +0100
+Subject: PM: Update comments describing device power management callbacks
+
+The comments describing device power management callbacks in
+include/pm.h are outdated and somewhat confusing, so make them
+reflect the reality more accurately.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit f7bc83d87d242917ca0ee041ed509f57f361dd56)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm.h |  229 ++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 134 insertions(+), 95 deletions(-)
+
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 5c4c8b1..3f3ed83 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -54,118 +54,145 @@ typedef struct pm_message {
+ /**
+  * struct dev_pm_ops - device PM callbacks
+  *
+- * Several driver power state transitions are externally visible, affecting
++ * Several device power state transitions are externally visible, affecting
+  * the state of pending I/O queues and (for drivers that touch hardware)
+  * interrupts, wakeups, DMA, and other hardware state.  There may also be
+- * internal transitions to various low power modes, which are transparent
++ * internal transitions to various low-power modes which are transparent
+  * to the rest of the driver stack (such as a driver that's ON gating off
+  * clocks which are not in active use).
+  *
+- * The externally visible transitions are handled with the help of the following
+- * callbacks included in this structure:
+- *
+- * @prepare: Prepare the device for the upcoming transition, but do NOT change
+- *    its hardware state.  Prevent new children of the device from being
+- *    registered after @prepare() returns (the driver's subsystem and
+- *    generally the rest of the kernel is supposed to prevent new calls to the
+- *    probe method from being made too once @prepare() has succeeded).  If
+- *    @prepare() detects a situation it cannot handle (e.g. registration of a
+- *    child already in progress), it may return -EAGAIN, so that the PM core
+- *    can execute it once again (e.g. after the new child has been registered)
+- *    to recover from the race condition.  This method is executed for all
+- *    kinds of suspend transitions and is followed by one of the suspend
+- *    callbacks: @suspend(), @freeze(), or @poweroff().
+- *    The PM core executes @prepare() for all devices before starting to
+- *    execute suspend callbacks for any of them, so drivers may assume all of
+- *    the other devices to be present and functional while @prepare() is being
+- *    executed.  In particular, it is safe to make GFP_KERNEL memory
+- *    allocations from within @prepare().  However, drivers may NOT assume
+- *    anything about the availability of the user space at that time and it
+- *    is not correct to request firmware from within @prepare() (it's too
+- *    late to do that).  [To work around this limitation, drivers may
+- *    register suspend and hibernation notifiers that are executed before the
+- *    freezing of tasks.]
++ * The externally visible transitions are handled with the help of callbacks
++ * included in this structure in such a way that two levels of callbacks are
++ * involved.  First, the PM core executes callbacks provided by PM domains,
++ * device types, classes and bus types.  They are the subsystem-level callbacks
++ * supposed to execute callbacks provided by device drivers, although they may
++ * choose not to do that.  If the driver callbacks are executed, they have to
++ * collaborate with the subsystem-level callbacks to achieve the goals
++ * appropriate for the given system transition, given transition phase and the
++ * subsystem the device belongs to.
++ *
++ * @prepare: The principal role of this callback is to prevent new children of
++ *    the device from being registered after it has returned (the driver's
++ *    subsystem and generally the rest of the kernel is supposed to prevent
++ *    new calls to the probe method from being made too once @prepare() has
++ *    succeeded).  If @prepare() detects a situation it cannot handle (e.g.
++ *    registration of a child already in progress), it may return -EAGAIN, so
++ *    that the PM core can execute it once again (e.g. after a new child has
++ *    been registered) to recover from the race condition.
++ *    This method is executed for all kinds of suspend transitions and is
++ *    followed by one of the suspend callbacks: @suspend(), @freeze(), or
++ *    @poweroff().  The PM core executes subsystem-level @prepare() for all
++ *    devices before starting to invoke suspend callbacks for any of them, so
++ *    generally devices may be assumed to be functional or to respond to
++ *    runtime resume requests while @prepare() is being executed.  However,
++ *    device drivers may NOT assume anything about the availability of user
++ *    space at that time and it is NOT valid to request firmware from within
++ *    @prepare() (it's too late to do that).  It also is NOT valid to allocate
++ *    substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
++ *    [To work around these limitations, drivers may register suspend and
++ *    hibernation notifiers to be executed before the freezing of tasks.]
+  *
+  * @complete: Undo the changes made by @prepare().  This method is executed for
+  *    all kinds of resume transitions, following one of the resume callbacks:
+  *    @resume(), @thaw(), @restore().  Also called if the state transition
+- *    fails before the driver's suspend callback (@suspend(), @freeze(),
+- *    @poweroff()) can be executed (e.g. if the suspend callback fails for one
++ *    fails before the driver's suspend callback: @suspend(), @freeze() or
++ *    @poweroff(), can be executed (e.g. if the suspend callback fails for one
+  *    of the other devices that the PM core has unsuccessfully attempted to
+  *    suspend earlier).
+- *    The PM core executes @complete() after it has executed the appropriate
+- *    resume callback for all devices.
++ *    The PM core executes subsystem-level @complete() after it has executed
++ *    the appropriate resume callbacks for all devices.
+  *
+  * @suspend: Executed before putting the system into a sleep state in which the
+- *    contents of main memory are preserved.  Quiesce the device, put it into
+- *    a low power state appropriate for the upcoming system state (such as
+- *    PCI_D3hot), and enable wakeup events as appropriate.
++ *    contents of main memory are preserved.  The exact action to perform
++ *    depends on the device's subsystem (PM domain, device type, class or bus
++ *    type), but generally the device must be quiescent after subsystem-level
++ *    @suspend() has returned, so that it doesn't do any I/O or DMA.
++ *    Subsystem-level @suspend() is executed for all devices after invoking
++ *    subsystem-level @prepare() for all of them.
+  *
+  * @resume: Executed after waking the system up from a sleep state in which the
+- *    contents of main memory were preserved.  Put the device into the
+- *    appropriate state, according to the information saved in memory by the
+- *    preceding @suspend().  The driver starts working again, responding to
+- *    hardware events and software requests.  The hardware may have gone
+- *    through a power-off reset, or it may have maintained state from the
+- *    previous suspend() which the driver may rely on while resuming.  On most
+- *    platforms, there are no restrictions on availability of resources like
+- *    clocks during @resume().
++ *    contents of main memory were preserved.  The exact action to perform
++ *    depends on the device's subsystem, but generally the driver is expected
++ *    to start working again, responding to hardware events and software
++ *    requests (the device itself may be left in a low-power state, waiting
++ *    for a runtime resume to occur).  The state of the device at the time its
++ *    driver's @resume() callback is run depends on the platform and subsystem
++ *    the device belongs to.  On most platforms, there are no restrictions on
++ *    availability of resources like clocks during @resume().
++ *    Subsystem-level @resume() is executed for all devices after invoking
++ *    subsystem-level @resume_noirq() for all of them.
+  *
+  * @freeze: Hibernation-specific, executed before creating a hibernation image.
+- *    Quiesce operations so that a consistent image can be created, but do NOT
+- *    otherwise put the device into a low power device state and do NOT emit
+- *    system wakeup events.  Save in main memory the device settings to be
+- *    used by @restore() during the subsequent resume from hibernation or by
+- *    the subsequent @thaw(), if the creation of the image or the restoration
+- *    of main memory contents from it fails.
++ *    Analogous to @suspend(), but it should not enable the device to signal
++ *    wakeup events or change its power state.  The majority of subsystems
++ *    (with the notable exception of the PCI bus type) expect the driver-level
++ *    @freeze() to save the device settings in memory to be used by @restore()
++ *    during the subsequent resume from hibernation.
++ *    Subsystem-level @freeze() is executed for all devices after invoking
++ *    subsystem-level @prepare() for all of them.
+  *
+  * @thaw: Hibernation-specific, executed after creating a hibernation image OR
+- *    if the creation of the image fails.  Also executed after a failing
++ *    if the creation of an image has failed.  Also executed after a failing
+  *    attempt to restore the contents of main memory from such an image.
+  *    Undo the changes made by the preceding @freeze(), so the device can be
+  *    operated in the same way as immediately before the call to @freeze().
++ *    Subsystem-level @thaw() is executed for all devices after invoking
++ *    subsystem-level @thaw_noirq() for all of them.  It also may be executed
++ *    directly after @freeze() in case of a transition error.
+  *
+  * @poweroff: Hibernation-specific, executed after saving a hibernation image.
+- *    Quiesce the device, put it into a low power state appropriate for the
+- *    upcoming system state (such as PCI_D3hot), and enable wakeup events as
+- *    appropriate.
++ *    Analogous to @suspend(), but it need not save the device's settings in
++ *    memory.
++ *    Subsystem-level @poweroff() is executed for all devices after invoking
++ *    subsystem-level @prepare() for all of them.
+  *
+  * @restore: Hibernation-specific, executed after restoring the contents of main
+- *    memory from a hibernation image.  Driver starts working again,
+- *    responding to hardware events and software requests.  Drivers may NOT
+- *    make ANY assumptions about the hardware state right prior to @restore().
+- *    On most platforms, there are no restrictions on availability of
+- *    resources like clocks during @restore().
+- *
+- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
+- *    actions required for suspending the device that need interrupts to be
+- *    disabled
+- *
+- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
+- *    actions required for resuming the device that need interrupts to be
+- *    disabled
+- *
+- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
+- *    actions required for freezing the device that need interrupts to be
+- *    disabled
+- *
+- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
+- *    actions required for thawing the device that need interrupts to be
+- *    disabled
+- *
+- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
+- *    actions required for handling the device that need interrupts to be
+- *    disabled
+- *
+- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
+- *    actions required for restoring the operations of the device that need
+- *    interrupts to be disabled
++ *    memory from a hibernation image, analogous to @resume().
++ *
++ * @suspend_noirq: Complete the actions started by @suspend().  Carry out any
++ *    additional operations required for suspending the device that might be
++ *    racing with its driver's interrupt handler, which is guaranteed not to
++ *    run while @suspend_noirq() is being executed.
++ *    It generally is expected that the device will be in a low-power state
++ *    (appropriate for the target system sleep state) after subsystem-level
++ *    @suspend_noirq() has returned successfully.  If the device can generate
++ *    system wakeup signals and is enabled to wake up the system, it should be
++ *    configured to do so at that time.  However, depending on the platform
++ *    and device's subsystem, @suspend() may be allowed to put the device into
++ *    the low-power state and configure it to generate wakeup signals, in
++ *    which case it generally is not necessary to define @suspend_noirq().
++ *
++ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
++ *    operations required for resuming the device that might be racing with
++ *    its driver's interrupt handler, which is guaranteed not to run while
++ *    @resume_noirq() is being executed.
++ *
++ * @freeze_noirq: Complete the actions started by @freeze().  Carry out any
++ *    additional operations required for freezing the device that might be
++ *    racing with its driver's interrupt handler, which is guaranteed not to
++ *    run while @freeze_noirq() is being executed.
++ *    The power state of the device should not be changed by either @freeze()
++ *    or @freeze_noirq() and it should not be configured to signal system
++ *    wakeup by any of these callbacks.
++ *
++ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
++ *    operations required for thawing the device that might be racing with its
++ *    driver's interrupt handler, which is guaranteed not to run while
++ *    @thaw_noirq() is being executed.
++ *
++ * @poweroff_noirq: Complete the actions started by @poweroff().  Analogous to
++ *    @suspend_noirq(), but it need not save the device's settings in memory.
++ *
++ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
++ *    operations required for thawing the device that might be racing with its
++ *    driver's interrupt handler, which is guaranteed not to run while
++ *    @restore_noirq() is being executed.  Analogous to @resume_noirq().
+  *
+  * All of the above callbacks, except for @complete(), return error codes.
+  * However, the error codes returned by the resume operations, @resume(),
+- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
++ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
+  * not cause the PM core to abort the resume transition during which they are
+- * returned.  The error codes returned in that cases are only printed by the PM
++ * returned.  The error codes returned in those cases are only printed by the PM
+  * core to the system logs for debugging purposes.  Still, it is recommended
+  * that drivers only return error codes from their resume methods in case of an
+  * unrecoverable failure (i.e. when the device being handled refuses to resume
+@@ -174,31 +201,43 @@ typedef struct pm_message {
+  * their children.
+  *
+  * It is allowed to unregister devices while the above callbacks are being
+- * executed.  However, it is not allowed to unregister a device from within any
+- * of its own callbacks.
++ * executed.  However, a callback routine must NOT try to unregister the device
++ * it was called for, although it may unregister children of that device (for
++ * example, if it detects that a child was unplugged while the system was
++ * asleep).
++ *
++ * Refer to Documentation/power/devices.txt for more information about the role
++ * of the above callbacks in the system suspend process.
+  *
+- * There also are the following callbacks related to run-time power management
+- * of devices:
++ * There also are callbacks related to runtime power management of devices.
++ * Again, these callbacks are executed by the PM core only for subsystems
++ * (PM domains, device types, classes and bus types) and the subsystem-level
++ * callbacks are supposed to invoke the driver callbacks.  Moreover, the exact
++ * actions to be performed by a device driver's callbacks generally depend on
++ * the platform and subsystem the device belongs to.
+  *
+  * @runtime_suspend: Prepare the device for a condition in which it won't be
+  *    able to communicate with the CPU(s) and RAM due to power management.
+- *    This need not mean that the device should be put into a low power state.
++ *    This need not mean that the device should be put into a low-power state.
+  *    For example, if the device is behind a link which is about to be turned
+  *    off, the device may remain at full power.  If the device does go to low
+- *    power and is capable of generating run-time wake-up events, remote
+- *    wake-up (i.e., a hardware mechanism allowing the device to request a
+- *    change of its power state via a wake-up event, such as PCI PME) should
+- *    be enabled for it.
++ *    power and is capable of generating runtime wakeup events, remote wakeup
++ *    (i.e., a hardware mechanism allowing the device to request a change of
++ *    its power state via an interrupt) should be enabled for it.
+  *
+  * @runtime_resume: Put the device into the fully active state in response to a
+- *    wake-up event generated by hardware or at the request of software.  If
+- *    necessary, put the device into the full power state and restore its
++ *    wakeup event generated by hardware or at the request of software.  If
++ *    necessary, put the device into the full-power state and restore its
+  *    registers, so that it is fully operational.
+  *
+- * @runtime_idle: Device appears to be inactive and it might be put into a low
+- *    power state if all of the necessary conditions are satisfied.  Check
++ * @runtime_idle: Device appears to be inactive and it might be put into a
++ *    low-power state if all of the necessary conditions are satisfied.  Check
+  *    these conditions and handle the device as appropriate, possibly queueing
+  *    a suspend request for it.  The return value is ignored by the PM core.
++ *
++ * Refer to Documentation/power/runtime_pm.txt for more information about the
++ * role of the above callbacks in device runtime power management.
++ *
+  */
+ struct dev_pm_ops {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0137-PM-Runtime-Use-device-PM-QoS-constraints-v2.patch b/patches.runtime_pm/0137-PM-Runtime-Use-device-PM-QoS-constraints-v2.patch
new file mode 100644 (file)
index 0000000..7eed598
--- /dev/null
@@ -0,0 +1,353 @@
+From ad08f99f086365289d67e35570f7a078ccbc96c9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 1 Dec 2011 00:01:31 +0100
+Subject: PM / Runtime: Use device PM QoS constraints (v2)
+
+Make the runtime PM core use device PM QoS constraints to check if
+it is allowed to suspend a given device, so that an error code is
+returned if the device's own PM QoS constraint is negative or one of
+its children has already been suspended for too long.  If this is
+not the case, the maximum estimated time the device is allowed to be
+suspended, computed as the minimum of the device's PM QoS constraint
+and the PM QoS constraints of its children (reduced by the difference
+between the current time and their suspend times) is stored in a new
+device's PM field power.max_time_suspended_ns that can be used by
+the device's subsystem or PM domain to decide whether or not to put
+the device into lower-power (and presumably higher-latency) states
+later (if the constraint is 0, which means "no constraint", the
+power.max_time_suspended_ns is set to -1).
+
+Additionally, the time of execution of the subsystem-level
+.runtime_suspend() callback for the device is recorded in the new
+power.suspend_time field for later use by the device's subsystem or
+PM domain along with power.max_time_suspended_ns (it also is used
+by the core code when the device's parent is suspended).
+
+Introduce a new helper function,
+pm_runtime_update_max_time_suspended(), allowing subsystems and PM
+domains (or device drivers) to update the power.max_time_suspended_ns
+field, for example after changing the power state of a suspended
+device.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 00dc9ad18d707f36b2fb4af98fd2cf0548d2b258)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/qos.c     |   24 ++++---
+ drivers/base/power/runtime.c |  148 ++++++++++++++++++++++++++++++++++++------
+ include/linux/pm.h           |    2 +
+ include/linux/pm_qos.h       |    3 +
+ include/linux/pm_runtime.h   |    5 ++
+ 5 files changed, 154 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index 86de6c5..03f4bd0 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
+ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
+ /**
+- * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
++ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
++ * @dev: Device to get the PM QoS constraint value for.
++ *
++ * This routine must be called with dev->power.lock held.
++ */
++s32 __dev_pm_qos_read_value(struct device *dev)
++{
++      struct pm_qos_constraints *c = dev->power.constraints;
++
++      return c ? pm_qos_read_value(c) : 0;
++}
++
++/**
++ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
+  * @dev: Device to get the PM QoS constraint value for.
+  */
+ s32 dev_pm_qos_read_value(struct device *dev)
+ {
+-      struct pm_qos_constraints *c;
+       unsigned long flags;
+-      s32 ret = 0;
++      s32 ret;
+       spin_lock_irqsave(&dev->power.lock, flags);
+-
+-      c = dev->power.constraints;
+-      if (c)
+-              ret = pm_qos_read_value(c);
+-
++      ret = __dev_pm_qos_read_value(dev);
+       spin_unlock_irqrestore(&dev->power.lock, flags);
+       return ret;
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8c78443..068f7ed 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -279,6 +279,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+       return retval != -EACCES ? retval : -EIO;
+ }
++struct rpm_qos_data {
++      ktime_t time_now;
++      s64 constraint_ns;
++};
++
++/**
++ * rpm_update_qos_constraint - Update a given PM QoS constraint data.
++ * @dev: Device whose timing data to use.
++ * @data: PM QoS constraint data to update.
++ *
++ * Use the suspend timing data of @dev to update PM QoS constraint data pointed
++ * to by @data.
++ */
++static int rpm_update_qos_constraint(struct device *dev, void *data)
++{
++      struct rpm_qos_data *qos = data;
++      unsigned long flags;
++      s64 delta_ns;
++      int ret = 0;
++
++      spin_lock_irqsave(&dev->power.lock, flags);
++
++      if (dev->power.max_time_suspended_ns < 0)
++              goto out;
++
++      delta_ns = dev->power.max_time_suspended_ns -
++              ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
++      if (delta_ns <= 0) {
++              ret = -EBUSY;
++              goto out;
++      }
++
++      if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
++              qos->constraint_ns = delta_ns;
++
++ out:
++      spin_unlock_irqrestore(&dev->power.lock, flags);
++
++      return ret;
++}
++
+ /**
+  * rpm_suspend - Carry out runtime suspend of given device.
+  * @dev: Device to suspend.
+@@ -305,6 +346,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ {
+       int (*callback)(struct device *);
+       struct device *parent = NULL;
++      struct rpm_qos_data qos;
+       int retval;
+       trace_rpm_suspend(dev, rpmflags);
+@@ -400,8 +442,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+               goto out;
+       }
++      qos.constraint_ns = __dev_pm_qos_read_value(dev);
++      if (qos.constraint_ns < 0) {
++              /* Negative constraint means "never suspend". */
++              retval = -EPERM;
++              goto out;
++      }
++      qos.constraint_ns *= NSEC_PER_USEC;
++      qos.time_now = ktime_get();
++
+       __update_runtime_status(dev, RPM_SUSPENDING);
++      if (!dev->power.ignore_children) {
++              if (dev->power.irq_safe)
++                      spin_unlock(&dev->power.lock);
++              else
++                      spin_unlock_irq(&dev->power.lock);
++
++              retval = device_for_each_child(dev, &qos,
++                                             rpm_update_qos_constraint);
++
++              if (dev->power.irq_safe)
++                      spin_lock(&dev->power.lock);
++              else
++                      spin_lock_irq(&dev->power.lock);
++
++              if (retval)
++                      goto fail;
++      }
++
++      dev->power.suspend_time = qos.time_now;
++      dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
++
+       if (dev->pm_domain)
+               callback = dev->pm_domain->ops.runtime_suspend;
+       else if (dev->type && dev->type->pm)
+@@ -414,27 +486,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+               callback = NULL;
+       retval = rpm_callback(callback, dev);
+-      if (retval) {
+-              __update_runtime_status(dev, RPM_ACTIVE);
+-              dev->power.deferred_resume = false;
+-              if (retval == -EAGAIN || retval == -EBUSY) {
+-                      dev->power.runtime_error = 0;
++      if (retval)
++              goto fail;
+-                      /*
+-                       * If the callback routine failed an autosuspend, and
+-                       * if the last_busy time has been updated so that there
+-                       * is a new autosuspend expiration time, automatically
+-                       * reschedule another autosuspend.
+-                       */
+-                      if ((rpmflags & RPM_AUTO) &&
+-                          pm_runtime_autosuspend_expiration(dev) != 0)
+-                              goto repeat;
+-              } else {
+-                      pm_runtime_cancel_pending(dev);
+-              }
+-              wake_up_all(&dev->power.wait_queue);
+-              goto out;
+-      }
+  no_callback:
+       __update_runtime_status(dev, RPM_SUSPENDED);
+       pm_runtime_deactivate_timer(dev);
+@@ -466,6 +520,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       return retval;
++
++ fail:
++      __update_runtime_status(dev, RPM_ACTIVE);
++      dev->power.suspend_time = ktime_set(0, 0);
++      dev->power.max_time_suspended_ns = -1;
++      dev->power.deferred_resume = false;
++      if (retval == -EAGAIN || retval == -EBUSY) {
++              dev->power.runtime_error = 0;
++
++              /*
++               * If the callback routine failed an autosuspend, and
++               * if the last_busy time has been updated so that there
++               * is a new autosuspend expiration time, automatically
++               * reschedule another autosuspend.
++               */
++              if ((rpmflags & RPM_AUTO) &&
++                  pm_runtime_autosuspend_expiration(dev) != 0)
++                      goto repeat;
++      } else {
++              pm_runtime_cancel_pending(dev);
++      }
++      wake_up_all(&dev->power.wait_queue);
++      goto out;
+ }
+ /**
+@@ -620,6 +697,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
+       if (dev->power.no_callbacks)
+               goto no_callback;       /* Assume success. */
++      dev->power.suspend_time = ktime_set(0, 0);
++      dev->power.max_time_suspended_ns = -1;
++
+       __update_runtime_status(dev, RPM_RESUMING);
+       if (dev->pm_domain)
+@@ -1279,6 +1359,9 @@ void pm_runtime_init(struct device *dev)
+       setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
+                       (unsigned long)dev);
++      dev->power.suspend_time = ktime_set(0, 0);
++      dev->power.max_time_suspended_ns = -1;
++
+       init_waitqueue_head(&dev->power.wait_queue);
+ }
+@@ -1296,3 +1379,28 @@ void pm_runtime_remove(struct device *dev)
+       if (dev->power.irq_safe && dev->parent)
+               pm_runtime_put_sync(dev->parent);
+ }
++
++/**
++ * pm_runtime_update_max_time_suspended - Update device's suspend time data.
++ * @dev: Device to handle.
++ * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
++ *
++ * Update the device's power.max_time_suspended_ns field by subtracting
++ * @delta_ns from it.  The resulting value of power.max_time_suspended_ns is
++ * never negative.
++ */
++void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&dev->power.lock, flags);
++
++      if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
++              if (dev->power.max_time_suspended_ns > delta_ns)
++                      dev->power.max_time_suspended_ns -= delta_ns;
++              else
++                      dev->power.max_time_suspended_ns = 0;
++      }
++
++      spin_unlock_irqrestore(&dev->power.lock, flags);
++}
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 3f3ed83..a7676ef 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -521,6 +521,8 @@ struct dev_pm_info {
+       unsigned long           active_jiffies;
+       unsigned long           suspended_jiffies;
+       unsigned long           accounting_timestamp;
++      ktime_t                 suspend_time;
++      s64                     max_time_suspended_ns;
+ #endif
+       struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
+       struct pm_qos_constraints *constraints;
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 83b0ea3..775a323 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request *req);
+ s32 pm_qos_read_value(struct pm_qos_constraints *c);
++s32 __dev_pm_qos_read_value(struct device *dev);
+ s32 dev_pm_qos_read_value(struct device *dev);
+ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+                          s32 value);
+@@ -119,6 +120,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
+ static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
+                       { return 0; }
++static inline s32 __dev_pm_qos_read_value(struct device *dev)
++                      { return 0; }
+ static inline s32 dev_pm_qos_read_value(struct device *dev)
+                       { return 0; }
+ static inline int dev_pm_qos_add_request(struct device *dev,
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index d3085e7..609daae 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
+ extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
+ extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
+ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
++extern void pm_runtime_update_max_time_suspended(struct device *dev,
++                                               s64 delta_ns);
+ static inline bool pm_children_suspended(struct device *dev)
+ {
+@@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
+ static inline unsigned long pm_runtime_autosuspend_expiration(
+                               struct device *dev) { return 0; }
++static inline void pm_runtime_update_max_time_suspended(struct device *dev,
++                                                      s64 delta_ns) {}
++
+ #endif /* !CONFIG_PM_RUNTIME */
+ static inline int pm_runtime_idle(struct device *dev)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0138-PM-Domains-Make-it-possible-to-use-per-device-domain.patch b/patches.runtime_pm/0138-PM-Domains-Make-it-possible-to-use-per-device-domain.patch
new file mode 100644 (file)
index 0000000..b69e647
--- /dev/null
@@ -0,0 +1,390 @@
+From 522b7f7f33abce468e7f6395bed696d60f0b830b Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 27 Nov 2011 13:11:36 +0100
+Subject: PM / Domains: Make it possible to use per-device domain callbacks
+
+The current generic PM domains code requires that the same .stop(),
+.start() and .active_wakeup() device callback routines be used for
+all devices in the given domain, which is inflexible and may not
+cover some specific use cases.  For this reason, make it possible to
+use device specific .start()/.stop() and .active_wakeup() callback
+routines by adding corresponding callback pointers to struct
+generic_pm_domain_data.  Add a new helper routine,
+pm_genpd_register_callbacks(), that can be used to populate
+the new per-device callback pointers.
+
+Modify the shmobile's power domains code to allow drivers to add
+their own code to be run during the device stop and start operations
+with the help of the new callback pointers.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit d5e4cbfe2049fca375cb19c4bc0cf676e8b4a88a)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  152 ++++++++++++++++++++++++++++++++-----------
+ include/linux/pm_domain.h   |   27 +++++++-
+ 2 files changed, 139 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 6790cf7..94afaa2 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -15,6 +15,23 @@
+ #include <linux/err.h>
+ #include <linux/sched.h>
+ #include <linux/suspend.h>
++#include <linux/export.h>
++
++#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)                \
++({                                                            \
++      type (*__routine)(struct device *__d);                  \
++      type __ret = (type)0;                                   \
++                                                              \
++      __routine = genpd->dev_ops.callback;                    \
++      if (__routine) {                                        \
++              __ret = __routine(dev);                         \
++      } else {                                                \
++              __routine = dev_gpd_data(dev)->ops.callback;    \
++              if (__routine)                                  \
++                      __ret = __routine(dev);                 \
++      }                                                       \
++      __ret;                                                  \
++})
+ static LIST_HEAD(gpd_list);
+ static DEFINE_MUTEX(gpd_list_lock);
+@@ -29,6 +46,16 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+       return pd_to_genpd(dev->pm_domain);
+ }
++static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
++}
++
++static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, start, dev);
++}
++
+ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+ {
+       bool ret = false;
+@@ -199,13 +226,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
+       mutex_unlock(&genpd->lock);
+       if (drv && drv->pm && drv->pm->runtime_suspend) {
+-              if (genpd->start_device)
+-                      genpd->start_device(dev);
+-
++              genpd_start_dev(genpd, dev);
+               ret = drv->pm->runtime_suspend(dev);
+-
+-              if (genpd->stop_device)
+-                      genpd->stop_device(dev);
++              genpd_stop_dev(genpd, dev);
+       }
+       mutex_lock(&genpd->lock);
+@@ -235,13 +258,9 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
+       mutex_unlock(&genpd->lock);
+       if (drv && drv->pm && drv->pm->runtime_resume) {
+-              if (genpd->start_device)
+-                      genpd->start_device(dev);
+-
++              genpd_start_dev(genpd, dev);
+               drv->pm->runtime_resume(dev);
+-
+-              if (genpd->stop_device)
+-                      genpd->stop_device(dev);
++              genpd_stop_dev(genpd, dev);
+       }
+       mutex_lock(&genpd->lock);
+@@ -413,6 +432,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
+ static int pm_genpd_runtime_suspend(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
++      int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -422,11 +442,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+       might_sleep_if(!genpd->dev_irq_safe);
+-      if (genpd->stop_device) {
+-              int ret = genpd->stop_device(dev);
+-              if (ret)
+-                      return ret;
+-      }
++      ret = genpd_stop_dev(genpd, dev);
++      if (ret)
++              return ret;
+       /*
+        * If power.irq_safe is set, this routine will be run with interrupts
+@@ -502,8 +520,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
+       mutex_unlock(&genpd->lock);
+  out:
+-      if (genpd->start_device)
+-              genpd->start_device(dev);
++      genpd_start_dev(genpd, dev);
+       return 0;
+ }
+@@ -534,6 +551,12 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+ #ifdef CONFIG_PM_SLEEP
++static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
++                                  struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
++}
++
+ /**
+  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
+  * @genpd: PM domain to power off, if possible.
+@@ -590,7 +613,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+       if (!device_can_wakeup(dev))
+               return false;
+-      active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
++      active_wakeup = genpd_dev_active_wakeup(genpd, dev);
+       return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+ }
+@@ -646,7 +669,7 @@ static int pm_genpd_prepare(struct device *dev)
+       /*
+        * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+        * so pm_genpd_poweron() will return immediately, but if the device
+-       * is suspended (e.g. it's been stopped by .stop_device()), we need
++       * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
+        * to make it operational.
+        */
+       pm_runtime_resume(dev);
+@@ -714,12 +737,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+       if (ret)
+               return ret;
+-      if (dev->power.wakeup_path
+-          && genpd->active_wakeup && genpd->active_wakeup(dev))
++      if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+               return 0;
+-      if (genpd->stop_device)
+-              genpd->stop_device(dev);
++      genpd_stop_dev(genpd, dev);
+       /*
+        * Since all of the "noirq" callbacks are executed sequentially, it is
+@@ -761,8 +782,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
+        */
+       pm_genpd_poweron(genpd);
+       genpd->suspended_count--;
+-      if (genpd->start_device)
+-              genpd->start_device(dev);
++      genpd_start_dev(genpd, dev);
+       return pm_generic_resume_noirq(dev);
+ }
+@@ -836,8 +856,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
+       if (ret)
+               return ret;
+-      if (genpd->stop_device)
+-              genpd->stop_device(dev);
++      genpd_stop_dev(genpd, dev);
+       return 0;
+ }
+@@ -864,8 +883,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
+       if (genpd->suspend_power_off)
+               return 0;
+-      if (genpd->start_device)
+-              genpd->start_device(dev);
++      genpd_start_dev(genpd, dev);
+       return pm_generic_thaw_noirq(dev);
+ }
+@@ -938,12 +956,10 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+       if (ret)
+               return ret;
+-      if (dev->power.wakeup_path
+-          && genpd->active_wakeup && genpd->active_wakeup(dev))
++      if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+               return 0;
+-      if (genpd->stop_device)
+-              genpd->stop_device(dev);
++      genpd_stop_dev(genpd, dev);
+       /*
+        * Since all of the "noirq" callbacks are executed sequentially, it is
+@@ -993,8 +1009,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
+       pm_genpd_poweron(genpd);
+       genpd->suspended_count--;
+-      if (genpd->start_device)
+-              genpd->start_device(dev);
++      genpd_start_dev(genpd, dev);
+       return pm_generic_restore_noirq(dev);
+ }
+@@ -1280,6 +1295,69 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ }
+ /**
++ * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
++ * @dev: Device to add the callbacks to.
++ * @ops: Set of callbacks to add.
++ */
++int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops)
++{
++      struct pm_domain_data *pdd;
++      int ret = 0;
++
++      if (!(dev && dev->power.subsys_data && ops))
++              return -EINVAL;
++
++      pm_runtime_disable(dev);
++      device_pm_lock();
++
++      pdd = dev->power.subsys_data->domain_data;
++      if (pdd) {
++              struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
++
++              gpd_data->ops = *ops;
++      } else {
++              ret = -EINVAL;
++      }
++
++      device_pm_unlock();
++      pm_runtime_enable(dev);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
++
++/**
++ * pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
++ * @dev: Device to remove the callbacks from.
++ */
++int pm_genpd_remove_callbacks(struct device *dev)
++{
++      struct pm_domain_data *pdd;
++      int ret = 0;
++
++      if (!(dev && dev->power.subsys_data))
++              return -EINVAL;
++
++      pm_runtime_disable(dev);
++      device_pm_lock();
++
++      pdd = dev->power.subsys_data->domain_data;
++      if (pdd) {
++              struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
++
++              gpd_data->ops = (struct gpd_dev_ops){ 0 };
++      } else {
++              ret = -EINVAL;
++      }
++
++      device_pm_unlock();
++      pm_runtime_enable(dev);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(pm_genpd_remove_callbacks);
++
++/**
+  * pm_genpd_init - Initialize a generic I/O PM domain object.
+  * @genpd: PM domain object to initialize.
+  * @gov: PM domain governor to associate with the domain (may be NULL).
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 65633e5..8949d2d 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -23,6 +23,12 @@ struct dev_power_governor {
+       bool (*power_down_ok)(struct dev_pm_domain *domain);
+ };
++struct gpd_dev_ops {
++      int (*start)(struct device *dev);
++      int (*stop)(struct device *dev);
++      bool (*active_wakeup)(struct device *dev);
++};
++
+ struct generic_pm_domain {
+       struct dev_pm_domain domain;    /* PM domain operations */
+       struct list_head gpd_list_node; /* Node in the global PM domains list */
+@@ -45,9 +51,7 @@ struct generic_pm_domain {
+       bool dev_irq_safe;      /* Device callbacks are IRQ-safe */
+       int (*power_off)(struct generic_pm_domain *domain);
+       int (*power_on)(struct generic_pm_domain *domain);
+-      int (*start_device)(struct device *dev);
+-      int (*stop_device)(struct device *dev);
+-      bool (*active_wakeup)(struct device *dev);
++      struct gpd_dev_ops dev_ops;
+ };
+ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+@@ -64,6 +68,7 @@ struct gpd_link {
+ struct generic_pm_domain_data {
+       struct pm_domain_data base;
++      struct gpd_dev_ops ops;
+       bool need_restore;
+ };
+@@ -73,6 +78,11 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
+ }
+ #ifdef CONFIG_PM_GENERIC_DOMAINS
++static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
++{
++      return to_gpd_data(dev->power.subsys_data->domain_data);
++}
++
+ extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                              struct device *dev);
+ extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+@@ -81,6 +91,8 @@ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+                                 struct generic_pm_domain *new_subdomain);
+ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+                                    struct generic_pm_domain *target);
++extern int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops);
++extern int pm_genpd_remove_callbacks(struct device *dev);
+ extern void pm_genpd_init(struct generic_pm_domain *genpd,
+                         struct dev_power_governor *gov, bool is_off);
+ extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+@@ -105,6 +117,15 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ {
+       return -ENOSYS;
+ }
++static inline int pm_genpd_add_callbacks(struct device *dev,
++                                       struct gpd_dev_ops *ops)
++{
++      return -ENOSYS;
++}
++static inline int pm_genpd_remove_callbacks(struct device *dev)
++{
++      return -ENOSYS;
++}
+ static inline void pm_genpd_init(struct generic_pm_domain *genpd,
+                                struct dev_power_governor *gov, bool is_off) {}
+ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0139-PM-Domains-Introduce-save-restore-state-device-callb.patch b/patches.runtime_pm/0139-PM-Domains-Introduce-save-restore-state-device-callb.patch
new file mode 100644 (file)
index 0000000..2a67b0d
--- /dev/null
@@ -0,0 +1,173 @@
+From 6dd0250b29345e51368d082c6bd993f917410029 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 27 Nov 2011 13:11:44 +0100
+Subject: PM / Domains: Introduce "save/restore state" device callbacks
+
+The current PM domains code uses device drivers' .runtime_suspend()
+and .runtime_resume() callbacks as the "save device state" and
+"restore device state" operations, which may not be appropriate in
+general, because it forces drivers to assume that they always will
+be used with generic PM domains.  However, in theory, the same
+hardware may be used in devices that don't belong to any PM
+domain, in which case it would be necessary to add "fake" PM
+domains to satisfy the above assumption.  It also may be located in
+a PM domain that's not handled with the help of the generic code.
+
+To allow device drivers that may be used along with the generic PM
+domains code of more flexibility, introduce new device callbacks,
+.save_state() and .restore_state(), that can be supplied by the
+drivers in addition to their "standard" runtime PM callbacks.  This
+will allow the drivers to be designed to work with generic PM domains
+as well as without them.
+
+For backwards compatibility, introduce default .save_state() and
+.restore_state() callback routines for PM domains that will execute
+a device driver's .runtime_suspend() and .runtime_resume() callbacks,
+respectively, for the given device if the driver doesn't provide its
+own implementations of .save_state() and .restore_state().
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit ecf00475f229fcf06362412ad2d15a3267e354a1)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   68 +++++++++++++++++++++++++++++++++++--------
+ include/linux/pm_domain.h   |    2 ++
+ 2 files changed, 58 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 94afaa2..3c9451b 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -56,6 +56,16 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+       return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+ }
++static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
++}
++
++static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
++}
++
+ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+ {
+       bool ret = false;
+@@ -217,7 +227,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
+ {
+       struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+       struct device *dev = pdd->dev;
+-      struct device_driver *drv = dev->driver;
+       int ret = 0;
+       if (gpd_data->need_restore)
+@@ -225,11 +234,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
+       mutex_unlock(&genpd->lock);
+-      if (drv && drv->pm && drv->pm->runtime_suspend) {
+-              genpd_start_dev(genpd, dev);
+-              ret = drv->pm->runtime_suspend(dev);
+-              genpd_stop_dev(genpd, dev);
+-      }
++      genpd_start_dev(genpd, dev);
++      ret = genpd_save_dev(genpd, dev);
++      genpd_stop_dev(genpd, dev);
+       mutex_lock(&genpd->lock);
+@@ -250,18 +257,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
+ {
+       struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+       struct device *dev = pdd->dev;
+-      struct device_driver *drv = dev->driver;
+       if (!gpd_data->need_restore)
+               return;
+       mutex_unlock(&genpd->lock);
+-      if (drv && drv->pm && drv->pm->runtime_resume) {
+-              genpd_start_dev(genpd, dev);
+-              drv->pm->runtime_resume(dev);
+-              genpd_stop_dev(genpd, dev);
+-      }
++      genpd_start_dev(genpd, dev);
++      genpd_restore_dev(genpd, dev);
++      genpd_stop_dev(genpd, dev);
+       mutex_lock(&genpd->lock);
+@@ -1358,6 +1362,44 @@ int pm_genpd_remove_callbacks(struct device *dev)
+ EXPORT_SYMBOL_GPL(pm_genpd_remove_callbacks);
+ /**
++ * pm_genpd_default_save_state - Default "save device state" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_save_state(struct device *dev)
++{
++      int (*cb)(struct device *__dev);
++      struct device_driver *drv = dev->driver;
++
++      cb = dev_gpd_data(dev)->ops.save_state;
++      if (cb)
++              return cb(dev);
++
++      if (drv && drv->pm && drv->pm->runtime_suspend)
++              return drv->pm->runtime_suspend(dev);
++
++      return 0;
++}
++
++/**
++ * pm_genpd_default_restore_state - Default PM domians "restore device state".
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_restore_state(struct device *dev)
++{
++      int (*cb)(struct device *__dev);
++      struct device_driver *drv = dev->driver;
++
++      cb = dev_gpd_data(dev)->ops.restore_state;
++      if (cb)
++              return cb(dev);
++
++      if (drv && drv->pm && drv->pm->runtime_resume)
++              return drv->pm->runtime_resume(dev);
++
++      return 0;
++}
++
++/**
+  * pm_genpd_init - Initialize a generic I/O PM domain object.
+  * @genpd: PM domain object to initialize.
+  * @gov: PM domain governor to associate with the domain (may be NULL).
+@@ -1400,6 +1442,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+       genpd->domain.ops.restore = pm_genpd_restore;
+       genpd->domain.ops.complete = pm_genpd_complete;
++      genpd->dev_ops.save_state = pm_genpd_default_save_state;
++      genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+       mutex_lock(&gpd_list_lock);
+       list_add(&genpd->gpd_list_node, &gpd_list);
+       mutex_unlock(&gpd_list_lock);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 8949d2d..731080d 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -26,6 +26,8 @@ struct dev_power_governor {
+ struct gpd_dev_ops {
+       int (*start)(struct device *dev);
+       int (*stop)(struct device *dev);
++      int (*save_state)(struct device *dev);
++      int (*restore_state)(struct device *dev);
+       bool (*active_wakeup)(struct device *dev);
+ };
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0140-PM-Domains-Rework-system-suspend-callback-routines-v.patch b/patches.runtime_pm/0140-PM-Domains-Rework-system-suspend-callback-routines-v.patch
new file mode 100644 (file)
index 0000000..8c69d7f
--- /dev/null
@@ -0,0 +1,420 @@
+From b0d643b7f97319158c0dfd2f5e43b33a4d5f0bfc Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 27 Nov 2011 13:11:51 +0100
+Subject: PM / Domains: Rework system suspend callback routines (v2)
+
+The current generic PM domains code attempts to use the generic
+system suspend operations along with the domains' device stop/start
+routines, which requires device drivers to assume that their
+system suspend/resume (and hibernation/restore) callbacks will always
+be used with generic PM domains.  However, in theory, the same
+hardware may be used in devices that don't belong to any PM domain,
+in which case it would be necessary to add "fake" PM domains to
+satisfy the above assumption.  Also, the domain the hardware belongs
+to may not be handled with the help of the generic code.
+
+To allow device drivers that may be used along with the generic PM
+domains code of more flexibility, add new device callbacks,
+.suspend(), .suspend_late(), .resume_early(), .resume(), .freeze(),
+.freeze_late(), .thaw_early(), and .thaw(), that can be supplied by
+the drivers in addition to their "standard" system suspend and
+hibernation callbacks.  These new callbacks, if defined, will be used
+by the generic PM domains code for the handling of system suspend and
+hibernation instead of the "standard" ones.  This will allow drivers
+to be designed to work with generic PM domains as well as without
+them.
+
+For backwards compatibility, introduce default implementations of the
+new callbacks for PM domains that will execute pm_generic_suspend(),
+pm_generic_suspend_noirq(), pm_generic_resume_noirq(),
+pm_generic_resume(), pm_generic_freeze(), pm_generic_freeze_noirq(),
+pm_generic_thaw_noirq(), and pm_generic_thaw(), respectively, for the
+given device if its driver doesn't define those callbacks.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit d23b9b00cdde5c93b914a172cecd57d5625fcd04)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  249 ++++++++++++++++++++++++++-----------------
+ include/linux/pm_domain.h   |    8 ++
+ 2 files changed, 158 insertions(+), 99 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 3c9451b..9a77080 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -561,6 +561,46 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+       return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
+ }
++static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
++}
++
++static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
++}
++
++static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
++}
++
++static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
++}
++
++static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
++}
++
++static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
++}
++
++static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
++}
++
++static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
++{
++      return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
++}
++
+ /**
+  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
+  * @genpd: PM domain to power off, if possible.
+@@ -712,7 +752,7 @@ static int pm_genpd_suspend(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
++      return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
+ }
+ /**
+@@ -737,7 +777,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+       if (genpd->suspend_power_off)
+               return 0;
+-      ret = pm_generic_suspend_noirq(dev);
++      ret = genpd_suspend_late(genpd, dev);
+       if (ret)
+               return ret;
+@@ -788,7 +828,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
+       genpd->suspended_count--;
+       genpd_start_dev(genpd, dev);
+-      return pm_generic_resume_noirq(dev);
++      return genpd_resume_early(genpd, dev);
+ }
+ /**
+@@ -809,7 +849,7 @@ static int pm_genpd_resume(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
++      return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
+ }
+ /**
+@@ -830,7 +870,7 @@ static int pm_genpd_freeze(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
++      return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
+ }
+ /**
+@@ -856,7 +896,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
+       if (genpd->suspend_power_off)
+               return 0;
+-      ret = pm_generic_freeze_noirq(dev);
++      ret = genpd_freeze_late(genpd, dev);
+       if (ret)
+               return ret;
+@@ -889,7 +929,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
+       genpd_start_dev(genpd, dev);
+-      return pm_generic_thaw_noirq(dev);
++      return genpd_thaw_early(genpd, dev);
+ }
+ /**
+@@ -910,70 +950,7 @@ static int pm_genpd_thaw(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+-}
+-
+-/**
+- * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
+- * @dev: Device to suspend.
+- *
+- * Power off a device under the assumption that its pm_domain field points to
+- * the domain member of an object of type struct generic_pm_domain representing
+- * a PM domain consisting of I/O devices.
+- */
+-static int pm_genpd_dev_poweroff(struct device *dev)
+-{
+-      struct generic_pm_domain *genpd;
+-
+-      dev_dbg(dev, "%s()\n", __func__);
+-
+-      genpd = dev_to_genpd(dev);
+-      if (IS_ERR(genpd))
+-              return -EINVAL;
+-
+-      return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
+-}
+-
+-/**
+- * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
+- * @dev: Device to suspend.
+- *
+- * Carry out a late powering off of a device under the assumption that its
+- * pm_domain field points to the domain member of an object of type
+- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+- */
+-static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+-{
+-      struct generic_pm_domain *genpd;
+-      int ret;
+-
+-      dev_dbg(dev, "%s()\n", __func__);
+-
+-      genpd = dev_to_genpd(dev);
+-      if (IS_ERR(genpd))
+-              return -EINVAL;
+-
+-      if (genpd->suspend_power_off)
+-              return 0;
+-
+-      ret = pm_generic_poweroff_noirq(dev);
+-      if (ret)
+-              return ret;
+-
+-      if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+-              return 0;
+-
+-      genpd_stop_dev(genpd, dev);
+-
+-      /*
+-       * Since all of the "noirq" callbacks are executed sequentially, it is
+-       * guaranteed that this function will never run twice in parallel for
+-       * the same PM domain, so it is not necessary to use locking here.
+-       */
+-      genpd->suspended_count++;
+-      pm_genpd_sync_poweroff(genpd);
+-
+-      return 0;
++      return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
+ }
+ /**
+@@ -1015,28 +992,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
+       genpd->suspended_count--;
+       genpd_start_dev(genpd, dev);
+-      return pm_generic_restore_noirq(dev);
+-}
+-
+-/**
+- * pm_genpd_restore - Restore a device belonging to an I/O power domain.
+- * @dev: Device to resume.
+- *
+- * Restore a device under the assumption that its pm_domain field points to the
+- * domain member of an object of type struct generic_pm_domain representing
+- * a power domain consisting of I/O devices.
+- */
+-static int pm_genpd_restore(struct device *dev)
+-{
+-      struct generic_pm_domain *genpd;
+-
+-      dev_dbg(dev, "%s()\n", __func__);
+-
+-      genpd = dev_to_genpd(dev);
+-      if (IS_ERR(genpd))
+-              return -EINVAL;
+-
+-      return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
++      return genpd_resume_early(genpd, dev);
+ }
+ /**
+@@ -1086,10 +1042,7 @@ static void pm_genpd_complete(struct device *dev)
+ #define pm_genpd_freeze_noirq         NULL
+ #define pm_genpd_thaw_noirq           NULL
+ #define pm_genpd_thaw                 NULL
+-#define pm_genpd_dev_poweroff_noirq   NULL
+-#define pm_genpd_dev_poweroff         NULL
+ #define pm_genpd_restore_noirq                NULL
+-#define pm_genpd_restore              NULL
+ #define pm_genpd_complete             NULL
+ #endif /* CONFIG_PM_SLEEP */
+@@ -1361,6 +1314,8 @@ int pm_genpd_remove_callbacks(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(pm_genpd_remove_callbacks);
++/* Default device callbacks for generic PM domains. */
++
+ /**
+  * pm_genpd_default_save_state - Default "save device state" for PM domians.
+  * @dev: Device to handle.
+@@ -1400,6 +1355,94 @@ static int pm_genpd_default_restore_state(struct device *dev)
+ }
+ /**
++ * pm_genpd_default_suspend - Default "device suspend" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_suspend(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
++
++      return cb ? cb(dev) : pm_generic_suspend(dev);
++}
++
++/**
++ * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_suspend_late(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
++
++      return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
++}
++
++/**
++ * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_resume_early(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
++
++      return cb ? cb(dev) : pm_generic_resume_noirq(dev);
++}
++
++/**
++ * pm_genpd_default_resume - Default "device resume" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_resume(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
++
++      return cb ? cb(dev) : pm_generic_resume(dev);
++}
++
++/**
++ * pm_genpd_default_freeze - Default "device freeze" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_freeze(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
++
++      return cb ? cb(dev) : pm_generic_freeze(dev);
++}
++
++/**
++ * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_freeze_late(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
++
++      return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
++}
++
++/**
++ * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_thaw_early(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
++
++      return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
++}
++
++/**
++ * pm_genpd_default_thaw - Default "device thaw" for PM domians.
++ * @dev: Device to handle.
++ */
++static int pm_genpd_default_thaw(struct device *dev)
++{
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
++
++      return cb ? cb(dev) : pm_generic_thaw(dev);
++}
++
++/**
+  * pm_genpd_init - Initialize a generic I/O PM domain object.
+  * @genpd: PM domain object to initialize.
+  * @gov: PM domain governor to associate with the domain (may be NULL).
+@@ -1437,13 +1480,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
+       genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+       genpd->domain.ops.thaw = pm_genpd_thaw;
+-      genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
+-      genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
++      genpd->domain.ops.poweroff = pm_genpd_suspend;
++      genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
+       genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+-      genpd->domain.ops.restore = pm_genpd_restore;
++      genpd->domain.ops.restore = pm_genpd_resume;
+       genpd->domain.ops.complete = pm_genpd_complete;
+       genpd->dev_ops.save_state = pm_genpd_default_save_state;
+       genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
++      genpd->dev_ops.freeze = pm_genpd_default_suspend;
++      genpd->dev_ops.freeze_late = pm_genpd_default_suspend_late;
++      genpd->dev_ops.thaw_early = pm_genpd_default_resume_early;
++      genpd->dev_ops.thaw = pm_genpd_default_resume;
++      genpd->dev_ops.freeze = pm_genpd_default_freeze;
++      genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
++      genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
++      genpd->dev_ops.thaw = pm_genpd_default_thaw;
+       mutex_lock(&gpd_list_lock);
+       list_add(&genpd->gpd_list_node, &gpd_list);
+       mutex_unlock(&gpd_list_lock);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 731080d..10a197d 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -28,6 +28,14 @@ struct gpd_dev_ops {
+       int (*stop)(struct device *dev);
+       int (*save_state)(struct device *dev);
+       int (*restore_state)(struct device *dev);
++      int (*suspend)(struct device *dev);
++      int (*suspend_late)(struct device *dev);
++      int (*resume_early)(struct device *dev);
++      int (*resume)(struct device *dev);
++      int (*freeze)(struct device *dev);
++      int (*freeze_late)(struct device *dev);
++      int (*thaw_early)(struct device *dev);
++      int (*thaw)(struct device *dev);
+       bool (*active_wakeup)(struct device *dev);
+ };
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0141-PM-Domains-Add-device-stop-governor-function-v4.patch b/patches.runtime_pm/0141-PM-Domains-Add-device-stop-governor-function-v4.patch
new file mode 100644 (file)
index 0000000..fb57af7
--- /dev/null
@@ -0,0 +1,319 @@
+From 28645793f33678ebc0c743228dc2b4a7c609aee8 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 1 Dec 2011 00:02:05 +0100
+Subject: PM / Domains: Add device stop governor function (v4)
+
+Add a function deciding whether or not devices should be stopped in
+pm_genpd_runtime_suspend() depending on their PM QoS constraints
+and stop/start timing values.  Make it possible to add information
+used by this function to device objects.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit b02c999ac325e977585abeb4caf6e0a2ee21e30b)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/Makefile          |    2 +-
+ drivers/base/power/domain.c          |   33 ++++++++++++++----
+ drivers/base/power/domain_governor.c |   33 ++++++++++++++++++
+ include/linux/pm_domain.h            |   63 +++++++++++++++++++++++++++++-----
+ 4 files changed, 115 insertions(+), 16 deletions(-)
+ create mode 100644 drivers/base/power/domain_governor.c
+
+diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
+index 81676dd..2e58ebb 100644
+--- a/drivers/base/power/Makefile
++++ b/drivers/base/power/Makefile
+@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
+ obj-$(CONFIG_PM_RUNTIME)      += runtime.o
+ obj-$(CONFIG_PM_TRACE_RTC)    += trace.o
+ obj-$(CONFIG_PM_OPP)  += opp.o
+-obj-$(CONFIG_PM_GENERIC_DOMAINS)      +=  domain.o
++obj-$(CONFIG_PM_GENERIC_DOMAINS)      +=  domain.o domain_governor.o
+ obj-$(CONFIG_HAVE_CLK)        += clock_ops.o
+ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 9a77080..3af9f5a 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -38,7 +38,7 @@ static DEFINE_MUTEX(gpd_list_lock);
+ #ifdef CONFIG_PM
+-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
++struct generic_pm_domain *dev_to_genpd(struct device *dev)
+ {
+       if (IS_ERR_OR_NULL(dev->pm_domain))
+               return ERR_PTR(-EINVAL);
+@@ -436,6 +436,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
+ static int pm_genpd_runtime_suspend(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
++      bool (*stop_ok)(struct device *__dev);
+       int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -446,10 +447,17 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+       might_sleep_if(!genpd->dev_irq_safe);
++      stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
++      if (stop_ok && !stop_ok(dev))
++              return -EBUSY;
++
+       ret = genpd_stop_dev(genpd, dev);
+       if (ret)
+               return ret;
++      pm_runtime_update_max_time_suspended(dev,
++                              dev_gpd_data(dev)->td.start_latency_ns);
++
+       /*
+        * If power.irq_safe is set, this routine will be run with interrupts
+        * off, so it can't use mutexes.
+@@ -1048,11 +1056,13 @@ static void pm_genpd_complete(struct device *dev)
+ #endif /* CONFIG_PM_SLEEP */
+ /**
+- * pm_genpd_add_device - Add a device to an I/O PM domain.
++ * __pm_genpd_add_device - Add a device to an I/O PM domain.
+  * @genpd: PM domain to add the device to.
+  * @dev: Device to be added.
++ * @td: Set of PM QoS timing parameters to attach to the device.
+  */
+-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
++int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
++                        struct gpd_timing_data *td)
+ {
+       struct generic_pm_domain_data *gpd_data;
+       struct pm_domain_data *pdd;
+@@ -1095,6 +1105,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+       gpd_data->base.dev = dev;
+       gpd_data->need_restore = false;
+       list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
++      if (td)
++              gpd_data->td = *td;
+  out:
+       genpd_release_lock(genpd);
+@@ -1255,8 +1267,10 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+  * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
+  * @dev: Device to add the callbacks to.
+  * @ops: Set of callbacks to add.
++ * @td: Timing data to add to the device along with the callbacks (optional).
+  */
+-int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops)
++int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
++                         struct gpd_timing_data *td)
+ {
+       struct pm_domain_data *pdd;
+       int ret = 0;
+@@ -1272,6 +1286,8 @@ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops)
+               struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+               gpd_data->ops = *ops;
++              if (td)
++                      gpd_data->td = *td;
+       } else {
+               ret = -EINVAL;
+       }
+@@ -1284,10 +1300,11 @@ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops)
+ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
+ /**
+- * pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
++ * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
+  * @dev: Device to remove the callbacks from.
++ * @clear_td: If set, clear the device's timing data too.
+  */
+-int pm_genpd_remove_callbacks(struct device *dev)
++int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+ {
+       struct pm_domain_data *pdd;
+       int ret = 0;
+@@ -1303,6 +1320,8 @@ int pm_genpd_remove_callbacks(struct device *dev)
+               struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+               gpd_data->ops = (struct gpd_dev_ops){ 0 };
++              if (clear_td)
++                      gpd_data->td = (struct gpd_timing_data){ 0 };
+       } else {
+               ret = -EINVAL;
+       }
+@@ -1312,7 +1331,7 @@ int pm_genpd_remove_callbacks(struct device *dev)
+       return ret;
+ }
+-EXPORT_SYMBOL_GPL(pm_genpd_remove_callbacks);
++EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+ /* Default device callbacks for generic PM domains. */
+diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
+new file mode 100644
+index 0000000..97b21c1
+--- /dev/null
++++ b/drivers/base/power/domain_governor.c
+@@ -0,0 +1,33 @@
++/*
++ * drivers/base/power/domain_governor.c - Governors for device PM domains.
++ *
++ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
++ *
++ * This file is released under the GPLv2.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/pm_domain.h>
++#include <linux/pm_qos.h>
++
++/**
++ * default_stop_ok - Default PM domain governor routine for stopping devices.
++ * @dev: Device to check.
++ */
++bool default_stop_ok(struct device *dev)
++{
++      struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
++              return true;
++
++      return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
++              && td->break_even_ns < dev->power.max_time_suspended_ns;
++}
++
++struct dev_power_governor simple_qos_governor = {
++      .stop_ok = default_stop_ok,
++};
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 10a197d..f6745c2 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -21,6 +21,7 @@ enum gpd_status {
+ struct dev_power_governor {
+       bool (*power_down_ok)(struct dev_pm_domain *domain);
++      bool (*stop_ok)(struct device *dev);
+ };
+ struct gpd_dev_ops {
+@@ -76,9 +77,16 @@ struct gpd_link {
+       struct list_head slave_node;
+ };
++struct gpd_timing_data {
++      s64 stop_latency_ns;
++      s64 start_latency_ns;
++      s64 break_even_ns;
++};
++
+ struct generic_pm_domain_data {
+       struct pm_domain_data base;
+       struct gpd_dev_ops ops;
++      struct gpd_timing_data td;
+       bool need_restore;
+ };
+@@ -93,20 +101,48 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+       return to_gpd_data(dev->power.subsys_data->domain_data);
+ }
+-extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
+-                             struct device *dev);
++extern struct dev_power_governor simple_qos_governor;
++
++extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
++extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
++                               struct device *dev,
++                               struct gpd_timing_data *td);
++
++static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
++                                    struct device *dev)
++{
++      return __pm_genpd_add_device(genpd, dev, NULL);
++}
++
+ extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+                                 struct device *dev);
+ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+                                 struct generic_pm_domain *new_subdomain);
+ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+                                    struct generic_pm_domain *target);
+-extern int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops);
+-extern int pm_genpd_remove_callbacks(struct device *dev);
++extern int pm_genpd_add_callbacks(struct device *dev,
++                                struct gpd_dev_ops *ops,
++                                struct gpd_timing_data *td);
++extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
+ extern void pm_genpd_init(struct generic_pm_domain *genpd,
+                         struct dev_power_governor *gov, bool is_off);
++
+ extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
++
++extern bool default_stop_ok(struct device *dev);
++
+ #else
++
++static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
++{
++      return ERR_PTR(-ENOSYS);
++}
++static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
++                                      struct device *dev,
++                                      struct gpd_timing_data *td)
++{
++      return -ENOSYS;
++}
+ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+ {
+@@ -128,22 +164,33 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+       return -ENOSYS;
+ }
+ static inline int pm_genpd_add_callbacks(struct device *dev,
+-                                       struct gpd_dev_ops *ops)
++                                       struct gpd_dev_ops *ops,
++                                       struct gpd_timing_data *td)
+ {
+       return -ENOSYS;
+ }
+-static inline int pm_genpd_remove_callbacks(struct device *dev)
++static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+ {
+       return -ENOSYS;
+ }
+-static inline void pm_genpd_init(struct generic_pm_domain *genpd,
+-                               struct dev_power_governor *gov, bool is_off) {}
++static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
++{
++}
+ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+ {
+       return -ENOSYS;
+ }
++static inline bool default_stop_ok(struct device *dev)
++{
++      return false;
++}
+ #endif
++static inline int pm_genpd_remove_callbacks(struct device *dev)
++{
++      return __pm_genpd_remove_callbacks(dev, true);
++}
++
+ #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
+ extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
+ extern void pm_genpd_poweroff_unused(void);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0142-PM-Domains-Add-default-power-off-governor-function-v.patch b/patches.runtime_pm/0142-PM-Domains-Add-default-power-off-governor-function-v.patch
new file mode 100644 (file)
index 0000000..8492152
--- /dev/null
@@ -0,0 +1,207 @@
+From 6748d6293797c1159618df2970e5249843449111 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 1 Dec 2011 00:02:10 +0100
+Subject: PM / Domains: Add default power off governor function (v4)
+
+Add a function deciding whether or not a given PM domain should
+be powered off on the basis of the PM QoS constraints of devices
+belonging to it and their PM QoS timing data.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 221e9b58380abdd6c05e11b4538597e2586ee141)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c          |   12 ++++
+ drivers/base/power/domain_governor.c |  110 ++++++++++++++++++++++++++++++++++
+ include/linux/pm_domain.h            |    7 +++
+ 3 files changed, 129 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 3af9f5a..9189619 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -398,6 +398,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       }
+       genpd->status = GPD_STATE_POWER_OFF;
++      genpd->power_off_time = ktime_get();
++
++      /* Update PM QoS information for devices in the domain. */
++      list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
++              struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
++
++              pm_runtime_update_max_time_suspended(pdd->dev,
++                                      td->start_latency_ns +
++                                      td->restore_state_latency_ns +
++                                      genpd->power_on_latency_ns);
++      }
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+               genpd_sd_counter_dec(link->master);
+@@ -1487,6 +1498,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->resume_count = 0;
+       genpd->device_count = 0;
+       genpd->suspended_count = 0;
++      genpd->max_off_time_ns = -1;
+       genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+       genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+       genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
+diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
+index 97b21c1..da78540 100644
+--- a/drivers/base/power/domain_governor.c
++++ b/drivers/base/power/domain_governor.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/pm_domain.h>
+ #include <linux/pm_qos.h>
++#include <linux/hrtimer.h>
+ /**
+  * default_stop_ok - Default PM domain governor routine for stopping devices.
+@@ -28,6 +29,115 @@ bool default_stop_ok(struct device *dev)
+               && td->break_even_ns < dev->power.max_time_suspended_ns;
+ }
++/**
++ * default_power_down_ok - Default generic PM domain power off governor routine.
++ * @pd: PM domain to check.
++ *
++ * This routine must be executed under the PM domain's lock.
++ */
++static bool default_power_down_ok(struct dev_pm_domain *pd)
++{
++      struct generic_pm_domain *genpd = pd_to_genpd(pd);
++      struct gpd_link *link;
++      struct pm_domain_data *pdd;
++      s64 min_dev_off_time_ns;
++      s64 off_on_time_ns;
++      ktime_t time_now = ktime_get();
++
++      off_on_time_ns = genpd->power_off_latency_ns +
++                              genpd->power_on_latency_ns;
++      /*
++       * It doesn't make sense to remove power from the domain if saving
++       * the state of all devices in it and the power off/power on operations
++       * take too much time.
++       *
++       * All devices in this domain have been stopped already at this point.
++       */
++      list_for_each_entry(pdd, &genpd->dev_list, list_node) {
++              if (pdd->dev->driver)
++                      off_on_time_ns +=
++                              to_gpd_data(pdd)->td.save_state_latency_ns;
++      }
++
++      /*
++       * Check if subdomains can be off for enough time.
++       *
++       * All subdomains have been powered off already at this point.
++       */
++      list_for_each_entry(link, &genpd->master_links, master_node) {
++              struct generic_pm_domain *sd = link->slave;
++              s64 sd_max_off_ns = sd->max_off_time_ns;
++
++              if (sd_max_off_ns < 0)
++                      continue;
++
++              sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
++                                                     sd->power_off_time));
++              /*
++               * Check if the subdomain is allowed to be off long enough for
++               * the current domain to turn off and on (that's how much time
++               * it will have to wait worst case).
++               */
++              if (sd_max_off_ns <= off_on_time_ns)
++                      return false;
++      }
++
++      /*
++       * Check if the devices in the domain can be off enough time.
++       */
++      min_dev_off_time_ns = -1;
++      list_for_each_entry(pdd, &genpd->dev_list, list_node) {
++              struct gpd_timing_data *td;
++              struct device *dev = pdd->dev;
++              s64 dev_off_time_ns;
++
++              if (!dev->driver || dev->power.max_time_suspended_ns < 0)
++                      continue;
++
++              td = &to_gpd_data(pdd)->td;
++              dev_off_time_ns = dev->power.max_time_suspended_ns -
++                      (td->start_latency_ns + td->restore_state_latency_ns +
++                              ktime_to_ns(ktime_sub(time_now,
++                                              dev->power.suspend_time)));
++              if (dev_off_time_ns <= off_on_time_ns)
++                      return false;
++
++              if (min_dev_off_time_ns > dev_off_time_ns
++                  || min_dev_off_time_ns < 0)
++                      min_dev_off_time_ns = dev_off_time_ns;
++      }
++
++      if (min_dev_off_time_ns < 0) {
++              /*
++               * There are no latency constraints, so the domain can spend
++               * arbitrary time in the "off" state.
++               */
++              genpd->max_off_time_ns = -1;
++              return true;
++      }
++
++      /*
++       * The difference between the computed minimum delta and the time needed
++       * to turn the domain on is the maximum theoretical time this domain can
++       * spend in the "off" state.
++       */
++      min_dev_off_time_ns -= genpd->power_on_latency_ns;
++
++      /*
++       * If the difference between the computed minimum delta and the time
++       * needed to turn the domain off and back on on is smaller than the
++       * domain's power break even time, removing power from the domain is not
++       * worth it.
++       */
++      if (genpd->break_even_ns >
++          min_dev_off_time_ns - genpd->power_off_latency_ns)
++              return false;
++
++      genpd->max_off_time_ns = min_dev_off_time_ns;
++      return true;
++}
++
+ struct dev_power_governor simple_qos_governor = {
+       .stop_ok = default_stop_ok,
++      .power_down_ok = default_power_down_ok,
+ };
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index f6745c2..cc1a245 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -61,8 +61,13 @@ struct generic_pm_domain {
+       bool suspend_power_off; /* Power status before system suspend */
+       bool dev_irq_safe;      /* Device callbacks are IRQ-safe */
+       int (*power_off)(struct generic_pm_domain *domain);
++      s64 power_off_latency_ns;
+       int (*power_on)(struct generic_pm_domain *domain);
++      s64 power_on_latency_ns;
+       struct gpd_dev_ops dev_ops;
++      s64 break_even_ns;      /* Power break even for the entire domain. */
++      s64 max_off_time_ns;    /* Maximum allowed "suspended" time. */
++      ktime_t power_off_time;
+ };
+ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+@@ -80,6 +85,8 @@ struct gpd_link {
+ struct gpd_timing_data {
+       s64 stop_latency_ns;
+       s64 start_latency_ns;
++      s64 save_state_latency_ns;
++      s64 restore_state_latency_ns;
+       s64 break_even_ns;
+ };
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0143-PM-Domains-Automatically-update-overoptimistic-laten.patch b/patches.runtime_pm/0143-PM-Domains-Automatically-update-overoptimistic-laten.patch
new file mode 100644 (file)
index 0000000..72cd3fb
--- /dev/null
@@ -0,0 +1,128 @@
+From 17a958115eaf26f242853d26d79463f9978397d3 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 1 Dec 2011 00:02:17 +0100
+Subject: PM / Domains: Automatically update overoptimistic latency
+ information
+
+Measure the time of execution of the .stop(), .start(), .save_state()
+and .restore_state() PM domain device callbacks and if the result
+is greater than the corresponding latency value stored in the
+device's struct generic_pm_domain_data object, replace the inaccurate
+value with the measured time.
+
+Do analogously for the PM domains' .power_off() and .power_off()
+callbacks.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 0140d8bd47f798d55c3720f7fcade9e50929a5e5)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   43 +++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 39 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 9189619..5a8d67d 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -33,6 +33,20 @@
+       __ret;                                                  \
+ })
++#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)     \
++({                                                                            \
++      ktime_t __start = ktime_get();                                          \
++      type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
++      s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
++      struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev);          \
++      if (__elapsed > __gpd_data->td.field) {                                 \
++              __gpd_data->td.field = __elapsed;                               \
++              dev_warn(dev, name " latency exceeded, new value %lld ns\n",    \
++                      __elapsed);                                             \
++      }                                                                       \
++      __retval;                                                               \
++})
++
+ static LIST_HEAD(gpd_list);
+ static DEFINE_MUTEX(gpd_list_lock);
+@@ -48,22 +62,27 @@ struct generic_pm_domain *dev_to_genpd(struct device *dev)
+ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+ {
+-      return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
++      return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
++                                      stop_latency_ns, "stop");
+ }
+ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+ {
+-      return GENPD_DEV_CALLBACK(genpd, int, start, dev);
++      return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
++                                      start_latency_ns, "start");
+ }
+ static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+ {
+-      return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
++      return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
++                                      save_state_latency_ns, "state save");
+ }
+ static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+ {
+-      return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
++      return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
++                                      restore_state_latency_ns,
++                                      "state restore");
+ }
+ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+@@ -182,9 +201,16 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+       }
+       if (genpd->power_on) {
++              ktime_t time_start = ktime_get();
++              s64 elapsed_ns;
++
+               ret = genpd->power_on(genpd);
+               if (ret)
+                       goto err;
++
++              elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
++              if (elapsed_ns > genpd->power_on_latency_ns)
++                      genpd->power_on_latency_ns = elapsed_ns;
+       }
+       genpd_set_active(genpd);
+@@ -377,11 +403,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       }
+       if (genpd->power_off) {
++              ktime_t time_start;
++              s64 elapsed_ns;
++
+               if (atomic_read(&genpd->sd_count) > 0) {
+                       ret = -EBUSY;
+                       goto out;
+               }
++              time_start = ktime_get();
++
+               /*
+                * If sd_count > 0 at this point, one of the subdomains hasn't
+                * managed to call pm_genpd_poweron() for the master yet after
+@@ -395,6 +426,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+                       genpd_set_active(genpd);
+                       goto out;
+               }
++
++              elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
++              if (elapsed_ns > genpd->power_off_latency_ns)
++                      genpd->power_off_latency_ns = elapsed_ns;
+       }
+       genpd->status = GPD_STATE_POWER_OFF;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0144-PM-Domains-fix-compilation-failure-for-CONFIG_PM_GEN.patch b/patches.runtime_pm/0144-PM-Domains-fix-compilation-failure-for-CONFIG_PM_GEN.patch
new file mode 100644 (file)
index 0000000..12b49e0
--- /dev/null
@@ -0,0 +1,44 @@
+From 4118e8ccd6bc292ac3ea0222ab334e62f54d8600 Mon Sep 17 00:00:00 2001
+From: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Date: Thu, 1 Dec 2011 00:05:31 +0100
+Subject: PM / Domains: fix compilation failure for CONFIG_PM_GENERIC_DOMAINS
+ unset
+
+Fix the following compalitaion breakage:
+
+In file included from linux/drivers/sh/pm_runtime.c:15:
+linux/include/linux/pm_domain.h: In function 'dev_to_genpd':
+linux/include/linux/pm_domain.h:142: error: implicit declaration of function 'ERR_PTR'
+linux/include/linux/pm_domain.h:142: warning: return makes pointer from integer without a cast
+In file included from linux/include/linux/sh_clk.h:10,
+                 from linux/drivers/sh/pm_runtime.c:19:
+linux/include/linux/err.h: At top level:
+linux/include/linux/err.h:22: error: conflicting types for 'ERR_PTR'
+linux/include/linux/pm_domain.h:142: note: previous implicit declaration of 'ERR_PTR' was here
+make[3]: *** [drivers/sh/pm_runtime.o] Error 1
+
+Reported-by: Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 4f042cdad40e1566a53b7ae85e72b6945a4b0fde)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_domain.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index cc1a245..fbb81bc 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -10,6 +10,7 @@
+ #define _LINUX_PM_DOMAIN_H
+ #include <linux/device.h>
++#include <linux/err.h>
+ enum gpd_status {
+       GPD_STATE_ACTIVE = 0,   /* PM domain is active */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0145-regulator-Fix-regulator_register-API-signature-in-Do.patch b/patches.runtime_pm/0145-regulator-Fix-regulator_register-API-signature-in-Do.patch
new file mode 100644 (file)
index 0000000..ea60020
--- /dev/null
@@ -0,0 +1,37 @@
+From 296366bad77047982adf3d08f0b33d7882712fb1 Mon Sep 17 00:00:00 2001
+From: Rajendra Nayak <rnayak@ti.com>
+Date: Mon, 5 Dec 2011 12:47:42 +0530
+Subject: regulator: Fix regulator_register() API signature in Documentation
+
+The commit 2c043bcbf287 ("regulator: pass additional of_node to
+regulator_register()") added an additional parameter to the
+regulator_register() API.
+Update the Documentation accordingly to reflect the change
+in the function signature.
+
+Reported-by: Thomas Abraham <thomas.abraham@linaro.org>
+Signed-off-by: Rajendra Nayak <rnayak@ti.com>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+(cherry picked from commit 068df0f34e81bc06c5eb5012ec2eda25624e87aa)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/regulator/regulator.txt |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/power/regulator/regulator.txt b/Documentation/power/regulator/regulator.txt
+index 3f8b528..e272d99 100644
+--- a/Documentation/power/regulator/regulator.txt
++++ b/Documentation/power/regulator/regulator.txt
+@@ -12,7 +12,7 @@ Drivers can register a regulator by calling :-
+ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
+       struct device *dev, struct regulator_init_data *init_data,
+-      void *driver_data);
++      void *driver_data, struct device_node *of_node);
+ This will register the regulators capabilities and operations to the regulator
+ core.
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0146-PM-Hibernate-Enable-usermodehelpers-in-software_resu.patch b/patches.runtime_pm/0146-PM-Hibernate-Enable-usermodehelpers-in-software_resu.patch
new file mode 100644 (file)
index 0000000..4b96acb
--- /dev/null
@@ -0,0 +1,38 @@
+From 5e8dc14ce6dff581023321c949c682d1baa263bb Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 1 Dec 2011 22:32:43 +0100
+Subject: PM / Hibernate: Enable usermodehelpers in software_resume() error
+ path
+
+In the software_resume() function defined in kernel/power/hibernate.c,
+if the call to create_basic_memory_bitmaps() fails, the usermodehelpers
+are not enabled (which had been disabled in the previous step). Fix it.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 0118521cc7acb3ccbc1a01d6144ac32be9d56a4c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 6495e6a..4f2ebf4 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -797,8 +797,10 @@ static int software_resume(void)
+               goto close_finish;
+       error = create_basic_memory_bitmaps();
+-      if (error)
++      if (error) {
++              usermodehelper_enable();
+               goto close_finish;
++      }
+       pr_debug("PM: Preparing processes for restore.\n");
+       error = freeze_processes();
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0147-PM-Hibernate-Thaw-processes-in-SNAPSHOT_CREATE_IMAGE.patch b/patches.runtime_pm/0147-PM-Hibernate-Thaw-processes-in-SNAPSHOT_CREATE_IMAGE.patch
new file mode 100644 (file)
index 0000000..72e1aa1
--- /dev/null
@@ -0,0 +1,83 @@
+From bd07f032506fb16af54aadcce89fd89d1378526e Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 1 Dec 2011 22:33:10 +0100
+Subject: PM / Hibernate: Thaw processes in SNAPSHOT_CREATE_IMAGE ioctl test
+ path
+
+Commit 2aede851ddf08666f68ffc17be446420e9d2a056 (PM / Hibernate: Freeze
+kernel threads after preallocating memory) moved the freezing of kernel
+threads to hibernation_snapshot() function.
+
+So now, if the call to hibernation_snapshot() returns early due to a
+successful hibernation test, the caller has to thaw processes to ensure
+that the system gets back to its original state.
+
+But in SNAPSHOT_CREATE_IMAGE hibernation ioctl, the caller does not thaw
+processes in case hibernation_snapshot() returned due to a successful
+freezer test. Fix this issue. But note we still send the value of 'in_suspend'
+(which is now 0) to userspace, because we are not in an error path per-se,
+and moreover, the value of in_suspend correctly depicts the situation here.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 97819a26224f019e73d88bb2fd4eb5a614860461)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |    2 +-
+ kernel/power/power.h     |    2 ++
+ kernel/power/user.c      |   11 ++++++++---
+ 3 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 4f2ebf4..19500b5 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -55,7 +55,7 @@ enum {
+ static int hibernation_mode = HIBERNATION_SHUTDOWN;
+-static bool freezer_test_done;
++bool freezer_test_done;
+ static const struct platform_hibernation_ops *hibernation_ops;
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 23a2db1..0c4defe 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
+ #define SPARE_PAGES   ((1024 * 1024) >> PAGE_SHIFT)
+ /* kernel/power/hibernate.c */
++extern bool freezer_test_done;
++
+ extern int hibernation_snapshot(int platform_mode);
+ extern int hibernation_restore(int platform_mode);
+ extern int hibernation_platform_enter(void);
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 7cc3f5b..c202e2e 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -281,10 +281,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               }
+               pm_restore_gfp_mask();
+               error = hibernation_snapshot(data->platform_support);
+-              if (!error)
++              if (!error) {
+                       error = put_user(in_suspend, (int __user *)arg);
+-              if (!error)
+-                      data->ready = 1;
++                      if (!error && !freezer_test_done)
++                              data->ready = 1;
++                      if (freezer_test_done) {
++                              freezer_test_done = false;
++                              thaw_processes();
++                      }
++              }
+               break;
+       case SNAPSHOT_ATOMIC_RESTORE:
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0148-PM-Hibernate-Remove-deprecated-hibernation-test-mode.patch b/patches.runtime_pm/0148-PM-Hibernate-Remove-deprecated-hibernation-test-mode.patch
new file mode 100644 (file)
index 0000000..131c512
--- /dev/null
@@ -0,0 +1,138 @@
+From 5c1e41ce179c516e509d8c2b6cc6ba90bf8c4a57 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 1 Dec 2011 22:33:20 +0100
+Subject: PM / Hibernate: Remove deprecated hibernation test modes
+
+The hibernation test modes 'test' and 'testproc' are deprecated, because
+the 'pm_test' framework offers much more fine-grained control for debugging
+suspend and hibernation related problems.
+
+So, remove the deprecated 'test' and 'testproc' hibernation test modes.
+
+Suggested-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 48580ab8729865c81e148d59159fbe2aa7865511)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   33 ++++-----------------------------
+ 1 file changed, 4 insertions(+), 29 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 19500b5..111947e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -43,8 +43,6 @@ int in_suspend __nosavedata;
+ enum {
+       HIBERNATION_INVALID,
+       HIBERNATION_PLATFORM,
+-      HIBERNATION_TEST,
+-      HIBERNATION_TESTPROC,
+       HIBERNATION_SHUTDOWN,
+       HIBERNATION_REBOOT,
+       /* keep last */
+@@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
+       mdelay(5000);
+ }
+-static int hibernation_testmode(int mode)
+-{
+-      if (hibernation_mode == mode) {
+-              hibernation_debug_sleep();
+-              return 1;
+-      }
+-      return 0;
+-}
+-
+ static int hibernation_test(int level)
+ {
+       if (pm_test_level == level) {
+@@ -114,7 +103,6 @@ static int hibernation_test(int level)
+       return 0;
+ }
+ #else /* !CONFIG_PM_DEBUG */
+-static int hibernation_testmode(int mode) { return 0; }
+ static int hibernation_test(int level) { return 0; }
+ #endif /* !CONFIG_PM_DEBUG */
+@@ -278,8 +266,7 @@ static int create_image(int platform_mode)
+               goto Platform_finish;
+       error = disable_nonboot_cpus();
+-      if (error || hibernation_test(TEST_CPUS)
+-          || hibernation_testmode(HIBERNATION_TEST))
++      if (error || hibernation_test(TEST_CPUS))
+               goto Enable_cpus;
+       local_irq_disable();
+@@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
+       if (error)
+               goto Cleanup;
+-      if (hibernation_test(TEST_FREEZER) ||
+-              hibernation_testmode(HIBERNATION_TESTPROC)) {
++      if (hibernation_test(TEST_FREEZER)) {
+               /*
+                * Indicate to the caller that we are returning due to a
+@@ -586,9 +572,6 @@ int hibernation_platform_enter(void)
+ static void power_down(void)
+ {
+       switch (hibernation_mode) {
+-      case HIBERNATION_TEST:
+-      case HIBERNATION_TESTPROC:
+-              break;
+       case HIBERNATION_REBOOT:
+               kernel_restart(NULL);
+               break;
+@@ -843,8 +826,6 @@ static const char * const hibernation_modes[] = {
+       [HIBERNATION_PLATFORM]  = "platform",
+       [HIBERNATION_SHUTDOWN]  = "shutdown",
+       [HIBERNATION_REBOOT]    = "reboot",
+-      [HIBERNATION_TEST]      = "test",
+-      [HIBERNATION_TESTPROC]  = "testproc",
+ };
+ /*
+@@ -853,17 +834,15 @@ static const char * const hibernation_modes[] = {
+  * Hibernation can be handled in several ways.  There are a few different ways
+  * to put the system into the sleep state: using the platform driver (e.g. ACPI
+  * or other hibernation_ops), powering it off or rebooting it (for testing
+- * mostly), or using one of the two available test modes.
++ * mostly).
+  *
+  * The sysfs file /sys/power/disk provides an interface for selecting the
+  * hibernation mode to use.  Reading from this file causes the available modes
+- * to be printed.  There are 5 modes that can be supported:
++ * to be printed.  There are 3 modes that can be supported:
+  *
+  *    'platform'
+  *    'shutdown'
+  *    'reboot'
+- *    'test'
+- *    'testproc'
+  *
+  * If a platform hibernation driver is in use, 'platform' will be supported
+  * and will be used by default.  Otherwise, 'shutdown' will be used by default.
+@@ -887,8 +866,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
+               switch (i) {
+               case HIBERNATION_SHUTDOWN:
+               case HIBERNATION_REBOOT:
+-              case HIBERNATION_TEST:
+-              case HIBERNATION_TESTPROC:
+                       break;
+               case HIBERNATION_PLATFORM:
+                       if (hibernation_ops)
+@@ -929,8 +906,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
+               switch (mode) {
+               case HIBERNATION_SHUTDOWN:
+               case HIBERNATION_REBOOT:
+-              case HIBERNATION_TEST:
+-              case HIBERNATION_TESTPROC:
+                       hibernation_mode = mode;
+                       break;
+               case HIBERNATION_PLATFORM:
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0149-PM-Sleep-Unify-diagnostic-messages-from-device-suspe.patch b/patches.runtime_pm/0149-PM-Sleep-Unify-diagnostic-messages-from-device-suspe.patch
new file mode 100644 (file)
index 0000000..e194260
--- /dev/null
@@ -0,0 +1,251 @@
+From 7bd8f160a2aa98a9818403722f52e46f7cd5220f Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 3 Dec 2011 00:23:43 +0100
+Subject: PM / Sleep: Unify diagnostic messages from device suspend/resume
+
+Make pm_op() and pm_noirq_op() use the same helper function for
+running callbacks, which will cause them to use the same format of
+diagnostic messages.  This also reduces the complexity and size of
+the code quite a bit.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
+(cherry picked from commit 0c6aebe31861c470c8cfbfdfdfd72d1369a6440b)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/main.c |  128 +++++++++++++--------------------------------
+ 1 file changed, 35 insertions(+), 93 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 406f82c..b570189 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -164,8 +164,9 @@ static ktime_t initcall_debug_start(struct device *dev)
+       ktime_t calltime = ktime_set(0, 0);
+       if (initcall_debug) {
+-              pr_info("calling  %s+ @ %i\n",
+-                              dev_name(dev), task_pid_nr(current));
++              pr_info("calling  %s+ @ %i, parent: %s\n",
++                      dev_name(dev), task_pid_nr(current),
++                      dev->parent ? dev_name(dev->parent) : "none");
+               calltime = ktime_get();
+       }
+@@ -210,6 +211,24 @@ static void dpm_wait_for_children(struct device *dev, bool async)
+        device_for_each_child(dev, &async, dpm_wait_fn);
+ }
++static int dpm_run_callback(struct device *dev, int (*cb)(struct device *))
++{
++      ktime_t calltime;
++      int error;
++
++      if (!cb)
++              return 0;
++
++      calltime = initcall_debug_start(dev);
++
++      error = cb(dev);
++      suspend_report_result(cb, error);
++
++      initcall_debug_report(dev, calltime, error);
++
++      return error;
++}
++
+ /**
+  * pm_op - Execute the PM operation appropriate for given PM event.
+  * @dev: Device to handle.
+@@ -221,59 +240,36 @@ static int pm_op(struct device *dev,
+                pm_message_t state)
+ {
+       int error = 0;
+-      ktime_t calltime;
+-
+-      calltime = initcall_debug_start(dev);
+       switch (state.event) {
+ #ifdef CONFIG_SUSPEND
+       case PM_EVENT_SUSPEND:
+-              if (ops->suspend) {
+-                      error = ops->suspend(dev);
+-                      suspend_report_result(ops->suspend, error);
+-              }
++              error = dpm_run_callback(dev, ops->suspend);
+               break;
+       case PM_EVENT_RESUME:
+-              if (ops->resume) {
+-                      error = ops->resume(dev);
+-                      suspend_report_result(ops->resume, error);
+-              }
++              error = dpm_run_callback(dev, ops->resume);
+               break;
+ #endif /* CONFIG_SUSPEND */
+ #ifdef CONFIG_HIBERNATE_CALLBACKS
+       case PM_EVENT_FREEZE:
+       case PM_EVENT_QUIESCE:
+-              if (ops->freeze) {
+-                      error = ops->freeze(dev);
+-                      suspend_report_result(ops->freeze, error);
+-              }
++              error = dpm_run_callback(dev, ops->freeze);
+               break;
+       case PM_EVENT_HIBERNATE:
+-              if (ops->poweroff) {
+-                      error = ops->poweroff(dev);
+-                      suspend_report_result(ops->poweroff, error);
+-              }
++              error = dpm_run_callback(dev, ops->poweroff);
+               break;
+       case PM_EVENT_THAW:
+       case PM_EVENT_RECOVER:
+-              if (ops->thaw) {
+-                      error = ops->thaw(dev);
+-                      suspend_report_result(ops->thaw, error);
+-              }
++              error = dpm_run_callback(dev, ops->thaw);
+               break;
+       case PM_EVENT_RESTORE:
+-              if (ops->restore) {
+-                      error = ops->restore(dev);
+-                      suspend_report_result(ops->restore, error);
+-              }
++              error = dpm_run_callback(dev, ops->restore);
+               break;
+ #endif /* CONFIG_HIBERNATE_CALLBACKS */
+       default:
+               error = -EINVAL;
+       }
+-      initcall_debug_report(dev, calltime, error);
+-
+       return error;
+ }
+@@ -291,70 +287,36 @@ static int pm_noirq_op(struct device *dev,
+                       pm_message_t state)
+ {
+       int error = 0;
+-      ktime_t calltime = ktime_set(0, 0), delta, rettime;
+-
+-      if (initcall_debug) {
+-              pr_info("calling  %s+ @ %i, parent: %s\n",
+-                              dev_name(dev), task_pid_nr(current),
+-                              dev->parent ? dev_name(dev->parent) : "none");
+-              calltime = ktime_get();
+-      }
+       switch (state.event) {
+ #ifdef CONFIG_SUSPEND
+       case PM_EVENT_SUSPEND:
+-              if (ops->suspend_noirq) {
+-                      error = ops->suspend_noirq(dev);
+-                      suspend_report_result(ops->suspend_noirq, error);
+-              }
++              error = dpm_run_callback(dev, ops->suspend_noirq);
+               break;
+       case PM_EVENT_RESUME:
+-              if (ops->resume_noirq) {
+-                      error = ops->resume_noirq(dev);
+-                      suspend_report_result(ops->resume_noirq, error);
+-              }
++              error = dpm_run_callback(dev, ops->resume_noirq);
+               break;
+ #endif /* CONFIG_SUSPEND */
+ #ifdef CONFIG_HIBERNATE_CALLBACKS
+       case PM_EVENT_FREEZE:
+       case PM_EVENT_QUIESCE:
+-              if (ops->freeze_noirq) {
+-                      error = ops->freeze_noirq(dev);
+-                      suspend_report_result(ops->freeze_noirq, error);
+-              }
++              error = dpm_run_callback(dev, ops->freeze_noirq);
+               break;
+       case PM_EVENT_HIBERNATE:
+-              if (ops->poweroff_noirq) {
+-                      error = ops->poweroff_noirq(dev);
+-                      suspend_report_result(ops->poweroff_noirq, error);
+-              }
++              error = dpm_run_callback(dev, ops->poweroff_noirq);
+               break;
+       case PM_EVENT_THAW:
+       case PM_EVENT_RECOVER:
+-              if (ops->thaw_noirq) {
+-                      error = ops->thaw_noirq(dev);
+-                      suspend_report_result(ops->thaw_noirq, error);
+-              }
++              error = dpm_run_callback(dev, ops->thaw_noirq);
+               break;
+       case PM_EVENT_RESTORE:
+-              if (ops->restore_noirq) {
+-                      error = ops->restore_noirq(dev);
+-                      suspend_report_result(ops->restore_noirq, error);
+-              }
++              error = dpm_run_callback(dev, ops->restore_noirq);
+               break;
+ #endif /* CONFIG_HIBERNATE_CALLBACKS */
+       default:
+               error = -EINVAL;
+       }
+-      if (initcall_debug) {
+-              rettime = ktime_get();
+-              delta = ktime_sub(rettime, calltime);
+-              printk("initcall %s_i+ returned %d after %Ld usecs\n",
+-                      dev_name(dev), error,
+-                      (unsigned long long)ktime_to_ns(delta) >> 10);
+-      }
+-
+       return error;
+ }
+@@ -486,26 +448,6 @@ void dpm_resume_noirq(pm_message_t state)
+ EXPORT_SYMBOL_GPL(dpm_resume_noirq);
+ /**
+- * legacy_resume - Execute a legacy (bus or class) resume callback for device.
+- * @dev: Device to resume.
+- * @cb: Resume callback to execute.
+- */
+-static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
+-{
+-      int error;
+-      ktime_t calltime;
+-
+-      calltime = initcall_debug_start(dev);
+-
+-      error = cb(dev);
+-      suspend_report_result(cb, error);
+-
+-      initcall_debug_report(dev, calltime, error);
+-
+-      return error;
+-}
+-
+-/**
+  * device_resume - Execute "resume" callbacks for given device.
+  * @dev: Device to handle.
+  * @state: PM transition of the system being carried out.
+@@ -553,7 +495,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+                       goto End;
+               } else if (dev->class->resume) {
+                       pm_dev_dbg(dev, state, "legacy class ");
+-                      error = legacy_resume(dev, dev->class->resume);
++                      error = dpm_run_callback(dev, dev->class->resume);
+                       goto End;
+               }
+       }
+@@ -564,7 +506,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+                       error = pm_op(dev, dev->bus->pm, state);
+               } else if (dev->bus->resume) {
+                       pm_dev_dbg(dev, state, "legacy ");
+-                      error = legacy_resume(dev, dev->bus->resume);
++                      error = dpm_run_callback(dev, dev->bus->resume);
+               }
+       }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0150-PM-Hibernate-Replace-unintuitive-if-condition-in-ker.patch b/patches.runtime_pm/0150-PM-Hibernate-Replace-unintuitive-if-condition-in-ker.patch
new file mode 100644 (file)
index 0000000..5c7fad0
--- /dev/null
@@ -0,0 +1,35 @@
+From 9f7c65e5f6da87311f7c565d534de0c2050b7342 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Sat, 3 Dec 2011 00:20:30 +0100
+Subject: PM / Hibernate: Replace unintuitive 'if' condition in
+ kernel/power/user.c with 'else'
+
+In the snapshot_ioctl() function, under SNAPSHOT_FREEZE, the code below
+freeze_processes() is a bit unintuitive. Improve it by replacing the
+second 'if' condition with an 'else' clause.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e5b16746f0f2d6883c226af52d90904ce0f7eee8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/user.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index c202e2e..06ea33d 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -259,7 +259,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               error = freeze_processes();
+               if (error)
+                       usermodehelper_enable();
+-              if (!error)
++              else
+                       data->frozen = 1;
+               break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0151-PM-Domains-Make-it-possible-to-assign-names-to-gener.patch b/patches.runtime_pm/0151-PM-Domains-Make-it-possible-to-assign-names-to-gener.patch
new file mode 100644 (file)
index 0000000..7b1305b
--- /dev/null
@@ -0,0 +1,73 @@
+From d3b904f78564a800dcc6f0ed4c1dc5dcfe937ea2 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 6 Dec 2011 22:19:54 +0100
+Subject: PM / Domains: Make it possible to assign names to generic PM domains
+
+Add a name member pointer to struct generic_pm_domain and use it in
+diagnostic messages regarding the domain power-off and power-on
+latencies.  Update the ARM shmobile SH7372 code to assign names to
+the PM domains used by it.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Magnus Damm <damm@opensource.se>
+(cherry picked from commit e84b2c202771bbd538866207efcb1f7dbab8045b)
+
+Conflicts:
+
+       arch/arm/mach-shmobile/pm-sh7372.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   14 ++++++++++++--
+ include/linux/pm_domain.h   |    1 +
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 5a8d67d..ad6ba2e 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -209,8 +209,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+                       goto err;
+               elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+-              if (elapsed_ns > genpd->power_on_latency_ns)
++              if (elapsed_ns > genpd->power_on_latency_ns) {
+                       genpd->power_on_latency_ns = elapsed_ns;
++                      if (genpd->name)
++                              pr_warning("%s: Power-on latency exceeded, "
++                                      "new value %lld ns\n", genpd->name,
++                                      elapsed_ns);
++              }
+       }
+       genpd_set_active(genpd);
+@@ -428,8 +433,13 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+               }
+               elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+-              if (elapsed_ns > genpd->power_off_latency_ns)
++              if (elapsed_ns > genpd->power_off_latency_ns) {
+                       genpd->power_off_latency_ns = elapsed_ns;
++                      if (genpd->name)
++                              pr_warning("%s: Power-off latency exceeded, "
++                                      "new value %lld ns\n", genpd->name,
++                                      elapsed_ns);
++              }
+       }
+       genpd->status = GPD_STATE_POWER_OFF;
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index fbb81bc..fb809b9 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -50,6 +50,7 @@ struct generic_pm_domain {
+       struct mutex lock;
+       struct dev_power_governor *gov;
+       struct work_struct power_off_work;
++      char *name;
+       unsigned int in_progress;       /* Number of devices being suspended now */
+       atomic_t sd_count;      /* Number of subdomains with power "on" */
+       enum gpd_status status; /* Current state of the domain */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0152-PM-Domains-Fix-default-system-suspend-resume-operati.patch b/patches.runtime_pm/0152-PM-Domains-Fix-default-system-suspend-resume-operati.patch
new file mode 100644 (file)
index 0000000..b63ee1d
--- /dev/null
@@ -0,0 +1,78 @@
+From 29bbccf78749e9b6f807fa5f442119838c06946a Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 6 Dec 2011 23:16:47 +0100
+Subject: PM / Domains: Fix default system suspend/resume operations
+
+Commit d23b9b00cdde5c93b914a172cecd57d5625fcd04 (PM / Domains: Rework
+system suspend callback routines (v2)) broke the system suspend and
+resume handling by devices belonging to generic PM domains, because
+it used freeze/thaw callbacks instead of suspend/resume ones and
+didn't initialize device callbacks for system suspend/resume
+properly at all.  Fix those problems.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c9914854b4ca339e511d052ce3a1a441ef15b928)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index ad6ba2e..92e6a90 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1435,7 +1435,7 @@ static int pm_genpd_default_restore_state(struct device *dev)
+  */
+ static int pm_genpd_default_suspend(struct device *dev)
+ {
+-      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
+       return cb ? cb(dev) : pm_generic_suspend(dev);
+ }
+@@ -1446,7 +1446,7 @@ static int pm_genpd_default_suspend(struct device *dev)
+  */
+ static int pm_genpd_default_suspend_late(struct device *dev)
+ {
+-      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+       return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+ }
+@@ -1457,7 +1457,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
+  */
+ static int pm_genpd_default_resume_early(struct device *dev)
+ {
+-      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+       return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+ }
+@@ -1468,7 +1468,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
+  */
+ static int pm_genpd_default_resume(struct device *dev)
+ {
+-      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
++      int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
+       return cb ? cb(dev) : pm_generic_resume(dev);
+ }
+@@ -1563,10 +1563,10 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->domain.ops.complete = pm_genpd_complete;
+       genpd->dev_ops.save_state = pm_genpd_default_save_state;
+       genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+-      genpd->dev_ops.freeze = pm_genpd_default_suspend;
+-      genpd->dev_ops.freeze_late = pm_genpd_default_suspend_late;
+-      genpd->dev_ops.thaw_early = pm_genpd_default_resume_early;
+-      genpd->dev_ops.thaw = pm_genpd_default_resume;
++      genpd->dev_ops.suspend = pm_genpd_default_suspend;
++      genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
++      genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
++      genpd->dev_ops.resume = pm_genpd_default_resume;
+       genpd->dev_ops.freeze = pm_genpd_default_freeze;
+       genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
+       genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0153-PM-Sleep-Replace-mutex_-un-lock-pm_mutex-with-un-loc.patch b/patches.runtime_pm/0153-PM-Sleep-Replace-mutex_-un-lock-pm_mutex-with-un-loc.patch
new file mode 100644 (file)
index 0000000..d3cc68f
--- /dev/null
@@ -0,0 +1,234 @@
+From aadb0e1259b7652dc77405af543c4b5a12fd6510 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Wed, 7 Dec 2011 22:29:54 +0100
+Subject: PM / Sleep: Replace mutex_[un]lock(&pm_mutex) with
+ [un]lock_system_sleep()
+
+Using [un]lock_system_sleep() is safer than directly using mutex_[un]lock()
+on 'pm_mutex', since the latter could lead to freezing failures. Hence convert
+all the present users of mutex_[un]lock(&pm_mutex) to use these safe APIs
+instead.
+
+Suggested-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Reviewed-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit bcda53faf5814c0c6025a0bd47108adfcbe9f199)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/kexec.c           |    4 ++--
+ kernel/power/hibernate.c |   16 ++++++++--------
+ kernel/power/main.c      |    4 ++--
+ kernel/power/suspend.c   |    4 ++--
+ kernel/power/user.c      |   16 ++++++++--------
+ 5 files changed, 22 insertions(+), 22 deletions(-)
+
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 8d814cb..581b553 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -1506,7 +1506,7 @@ int kernel_kexec(void)
+ #ifdef CONFIG_KEXEC_JUMP
+       if (kexec_image->preserve_context) {
+-              mutex_lock(&pm_mutex);
++              lock_system_sleep();
+               pm_prepare_console();
+               error = freeze_processes();
+               if (error) {
+@@ -1559,7 +1559,7 @@ int kernel_kexec(void)
+               thaw_processes();
+  Restore_console:
+               pm_restore_console();
+-              mutex_unlock(&pm_mutex);
++              unlock_system_sleep();
+       }
+ #endif
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 111947e..7bef755 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -69,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
+               WARN_ON(1);
+               return;
+       }
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       hibernation_ops = ops;
+       if (ops)
+               hibernation_mode = HIBERNATION_PLATFORM;
+       else if (hibernation_mode == HIBERNATION_PLATFORM)
+               hibernation_mode = HIBERNATION_SHUTDOWN;
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+ }
+ static bool entering_platform_hibernation;
+@@ -597,7 +597,7 @@ int hibernate(void)
+ {
+       int error;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       /* The snapshot device should not be opened while we're running */
+       if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+               error = -EBUSY;
+@@ -666,7 +666,7 @@ int hibernate(void)
+       pm_restore_console();
+       atomic_inc(&snapshot_device_available);
+  Unlock:
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return error;
+ }
+@@ -894,7 +894,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
+       p = memchr(buf, '\n', n);
+       len = p ? p - buf : n;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
+               if (len == strlen(hibernation_modes[i])
+                   && !strncmp(buf, hibernation_modes[i], len)) {
+@@ -920,7 +920,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
+       if (!error)
+               pr_debug("PM: Hibernation mode set to '%s'\n",
+                        hibernation_modes[mode]);
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return error ? error : n;
+ }
+@@ -947,9 +947,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
+       if (maj != MAJOR(res) || min != MINOR(res))
+               goto out;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       swsusp_resume_device = res;
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       printk(KERN_INFO "PM: Starting manual resume from disk\n");
+       noresume = 0;
+       software_resume();
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 7d36fb3..9824b41e 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
+       p = memchr(buf, '\n', n);
+       len = p ? p - buf : n;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       level = TEST_FIRST;
+       for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
+@@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
+                       break;
+               }
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return error ? error : n;
+ }
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index d336b27..4fd51be 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
+  */
+ void suspend_set_ops(const struct platform_suspend_ops *ops)
+ {
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       suspend_ops = ops;
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+ }
+ EXPORT_SYMBOL_GPL(suspend_set_ops);
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 06ea33d..98ade21 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -71,7 +71,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
+       struct snapshot_data *data;
+       int error;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+               error = -EBUSY;
+@@ -123,7 +123,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
+       data->platform_support = 0;
+  Unlock:
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return error;
+ }
+@@ -132,7 +132,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
+ {
+       struct snapshot_data *data;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       swsusp_free();
+       free_basic_memory_bitmaps();
+@@ -146,7 +146,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
+                       PM_POST_HIBERNATION : PM_POST_RESTORE);
+       atomic_inc(&snapshot_device_available);
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return 0;
+ }
+@@ -158,7 +158,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
+       ssize_t res;
+       loff_t pg_offp = *offp & ~PAGE_MASK;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       data = filp->private_data;
+       if (!data->ready) {
+@@ -179,7 +179,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
+               *offp += res;
+  Unlock:
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return res;
+ }
+@@ -191,7 +191,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
+       ssize_t res;
+       loff_t pg_offp = *offp & ~PAGE_MASK;
+-      mutex_lock(&pm_mutex);
++      lock_system_sleep();
+       data = filp->private_data;
+@@ -208,7 +208,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
+       if (res > 0)
+               *offp += res;
+ unlock:
+-      mutex_unlock(&pm_mutex);
++      unlock_system_sleep();
+       return res;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0154-PM-Sleep-Recommend-un-lock_system_sleep-over-using-p.patch b/patches.runtime_pm/0154-PM-Sleep-Recommend-un-lock_system_sleep-over-using-p.patch
new file mode 100644 (file)
index 0000000..529c744
--- /dev/null
@@ -0,0 +1,55 @@
+From 48b23f3e7721987e92b83b9b3be3a3c9746ea8db Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Wed, 7 Dec 2011 22:30:09 +0100
+Subject: PM / Sleep: Recommend [un]lock_system_sleep() over using pm_mutex
+ directly
+
+Update the documentation to explain the perils of directly using
+mutex_[un]lock(&pm_mutex) and recommend the usage of the safe
+APIs [un]lock_system_sleep() instead.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit cba3176e88fa134ece3ae1cf7e35dab9972d7853)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/freezing-of-tasks.txt |   25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
+index 3ab9fbd..6ccb68f 100644
+--- a/Documentation/power/freezing-of-tasks.txt
++++ b/Documentation/power/freezing-of-tasks.txt
+@@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
+ A driver must have all firmwares it may need in RAM before suspend() is called.
+ If keeping them is not practical, for example due to their size, they must be
+ requested early enough using the suspend notifier API described in notifiers.txt.
++
++VI. Are there any precautions to be taken to prevent freezing failures?
++
++Yes, there are.
++
++First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
++from system-wide sleep such as suspend/hibernation is not encouraged.
++If possible, that piece of code must instead hook onto the suspend/hibernation
++notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
++(kernel/cpu.c) for an example.
++
++However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
++it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
++that could lead to freezing failures, because if the suspend/hibernate code
++successfully acquired the 'pm_mutex' lock, and hence that other entity failed
++to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
++state. As a consequence, the freezer would not be able to freeze that task,
++leading to freezing failure.
++
++However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
++since they ask the freezer to skip freezing this task, since it is anyway
++"frozen enough" as it is blocked on 'pm_mutex', which will be released
++only after the entire suspend/hibernation sequence is complete.
++So, to summarize, use [un]lock_system_sleep() instead of directly using
++mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0155-PM-Domains-Provide-an-always-on-power-domain-governo.patch b/patches.runtime_pm/0155-PM-Domains-Provide-an-always-on-power-domain-governo.patch
new file mode 100644 (file)
index 0000000..b5cf444
--- /dev/null
@@ -0,0 +1,66 @@
+From 4fe58a1e5cb487f882266b410d34b2c5041eea7c Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Date: Thu, 8 Dec 2011 23:27:28 +0100
+Subject: PM / Domains: Provide an always on power domain governor
+
+Since systems are likely to have power domains that can't be turned off
+for various reasons at least temporarily while implementing power domain
+support provide a default governor which will always refuse to power off
+the domain, saving platforms having to implement their own.
+
+Since the code is so tiny don't bother with a Kconfig symbol for it.
+
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 925b44a273aa8c4c23c006c1228aacd538eead09)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain_governor.c |   13 +++++++++++++
+ include/linux/pm_domain.h            |    2 ++
+ 2 files changed, 15 insertions(+)
+
+diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
+index da78540..51527ee 100644
+--- a/drivers/base/power/domain_governor.c
++++ b/drivers/base/power/domain_governor.c
+@@ -141,3 +141,16 @@ struct dev_power_governor simple_qos_governor = {
+       .stop_ok = default_stop_ok,
+       .power_down_ok = default_power_down_ok,
+ };
++
++static bool always_on_power_down_ok(struct dev_pm_domain *domain)
++{
++      return false;
++}
++
++/**
++ * pm_genpd_gov_always_on - A governor implementing an always-on policy
++ */
++struct dev_power_governor pm_domain_always_on_gov = {
++      .power_down_ok = always_on_power_down_ok,
++      .stop_ok = default_stop_ok,
++};
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index fb809b9..a03a0ad 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -140,6 +140,7 @@ extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+ extern bool default_stop_ok(struct device *dev);
++extern struct dev_power_governor pm_domain_always_on_gov;
+ #else
+ static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+@@ -193,6 +194,7 @@ static inline bool default_stop_ok(struct device *dev)
+ {
+       return false;
+ }
++#define pm_domain_always_on_gov NULL
+ #endif
+ static inline int pm_genpd_remove_callbacks(struct device *dev)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0156-PM-Hibernate-Remove-deprecated-hibernation-snapshot-.patch b/patches.runtime_pm/0156-PM-Hibernate-Remove-deprecated-hibernation-snapshot-.patch
new file mode 100644 (file)
index 0000000..29ddbe3
--- /dev/null
@@ -0,0 +1,190 @@
+From ee1ef28d12bfe55da8da63631e8ea1b35c345977 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 8 Dec 2011 23:42:53 +0100
+Subject: PM / Hibernate: Remove deprecated hibernation snapshot ioctls
+
+Several snapshot ioctls were marked for removal quite some time ago,
+since they were deprecated. Remove them.
+
+Suggested-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit cf007e3526a785a95a738d5a8fba44f1f4fe33e0)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/feature-removal-schedule.txt |   11 ----
+ kernel/power/user.c                        |   87 ----------------------------
+ 2 files changed, 98 deletions(-)
+
+diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
+index b1c921c..7bb7798 100644
+--- a/Documentation/feature-removal-schedule.txt
++++ b/Documentation/feature-removal-schedule.txt
+@@ -85,17 +85,6 @@ Who:        Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
+ ---------------------------
+-What: Deprecated snapshot ioctls
+-When: 2.6.36
+-
+-Why:  The ioctls in kernel/power/user.c were marked as deprecated long time
+-      ago. Now they notify users about that so that they need to replace
+-      their userspace. After some more time, remove them completely.
+-
+-Who:  Jiri Slaby <jirislaby@gmail.com>
+-
+----------------------------
+-
+ What: The ieee80211_regdom module parameter
+ When: March 2010 / desktop catchup
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 98ade21..78bdb44 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -30,28 +30,6 @@
+ #include "power.h"
+-/*
+- * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
+- * will be removed in the future.  They are only preserved here for
+- * compatibility with existing userland utilities.
+- */
+-#define SNAPSHOT_SET_SWAP_FILE        _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
+-#define SNAPSHOT_PMOPS                _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
+-
+-#define PMOPS_PREPARE 1
+-#define PMOPS_ENTER   2
+-#define PMOPS_FINISH  3
+-
+-/*
+- * NOTE: The following ioctl definitions are wrong and have been replaced with
+- * correct ones.  They are only preserved here for compatibility with existing
+- * userland utilities and will be removed in the future.
+- */
+-#define SNAPSHOT_ATOMIC_SNAPSHOT      _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
+-#define SNAPSHOT_SET_IMAGE_SIZE               _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
+-#define SNAPSHOT_AVAIL_SWAP           _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
+-#define SNAPSHOT_GET_SWAP_PAGE                _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
+-
+ #define SNAPSHOT_MINOR        231
+@@ -213,15 +191,6 @@ unlock:
+       return res;
+ }
+-static void snapshot_deprecated_ioctl(unsigned int cmd)
+-{
+-      if (printk_ratelimit())
+-              printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
+-                              "be removed soon, update your suspend-to-disk "
+-                              "utilities\n",
+-                              __builtin_return_address(0), cmd);
+-}
+-
+ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+                                                       unsigned long arg)
+ {
+@@ -272,8 +241,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               data->frozen = 0;
+               break;
+-      case SNAPSHOT_ATOMIC_SNAPSHOT:
+-              snapshot_deprecated_ioctl(cmd);
+       case SNAPSHOT_CREATE_IMAGE:
+               if (data->mode != O_RDONLY || !data->frozen  || data->ready) {
+                       error = -EPERM;
+@@ -308,8 +275,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               data->ready = 0;
+               break;
+-      case SNAPSHOT_SET_IMAGE_SIZE:
+-              snapshot_deprecated_ioctl(cmd);
+       case SNAPSHOT_PREF_IMAGE_SIZE:
+               image_size = arg;
+               break;
+@@ -324,16 +289,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               error = put_user(size, (loff_t __user *)arg);
+               break;
+-      case SNAPSHOT_AVAIL_SWAP:
+-              snapshot_deprecated_ioctl(cmd);
+       case SNAPSHOT_AVAIL_SWAP_SIZE:
+               size = count_swap_pages(data->swap, 1);
+               size <<= PAGE_SHIFT;
+               error = put_user(size, (loff_t __user *)arg);
+               break;
+-      case SNAPSHOT_GET_SWAP_PAGE:
+-              snapshot_deprecated_ioctl(cmd);
+       case SNAPSHOT_ALLOC_SWAP_PAGE:
+               if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
+                       error = -ENODEV;
+@@ -356,27 +317,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               free_all_swap_pages(data->swap);
+               break;
+-      case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
+-              snapshot_deprecated_ioctl(cmd);
+-              if (!swsusp_swap_in_use()) {
+-                      /*
+-                       * User space encodes device types as two-byte values,
+-                       * so we need to recode them
+-                       */
+-                      if (old_decode_dev(arg)) {
+-                              data->swap = swap_type_of(old_decode_dev(arg),
+-                                                      0, NULL);
+-                              if (data->swap < 0)
+-                                      error = -ENODEV;
+-                      } else {
+-                              data->swap = -1;
+-                              error = -EINVAL;
+-                      }
+-              } else {
+-                      error = -EPERM;
+-              }
+-              break;
+-
+       case SNAPSHOT_S2RAM:
+               if (!data->frozen) {
+                       error = -EPERM;
+@@ -399,33 +339,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+                       error = hibernation_platform_enter();
+               break;
+-      case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
+-              snapshot_deprecated_ioctl(cmd);
+-              error = -EINVAL;
+-
+-              switch (arg) {
+-
+-              case PMOPS_PREPARE:
+-                      data->platform_support = 1;
+-                      error = 0;
+-                      break;
+-
+-              case PMOPS_ENTER:
+-                      if (data->platform_support)
+-                              error = hibernation_platform_enter();
+-                      break;
+-
+-              case PMOPS_FINISH:
+-                      if (data->platform_support)
+-                              error = 0;
+-                      break;
+-
+-              default:
+-                      printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
+-
+-              }
+-              break;
+-
+       case SNAPSHOT_SET_SWAP_AREA:
+               if (swsusp_swap_in_use()) {
+                       error = -EPERM;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0157-PM-Sleep-Simplify-generic-system-suspend-callbacks.patch b/patches.runtime_pm/0157-PM-Sleep-Simplify-generic-system-suspend-callbacks.patch
new file mode 100644 (file)
index 0000000..376d72f
--- /dev/null
@@ -0,0 +1,97 @@
+From 40e96ab9923917457fdee1fb41be5a62248e4358 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 15 Dec 2011 20:59:23 +0100
+Subject: PM / Sleep: Simplify generic system suspend callbacks
+
+The pm_runtime_suspended() check in __pm_generic_call() doesn't
+really help and may cause problems to happen, because in some cases
+the system suspend callbacks need to be called even if the given
+device has been suspended by runtime PM.  For example, if the device
+generally supports remote wakeup and is not enabled to wake up
+the system from sleep, it should be prevented from generating wakeup
+signals during system suspend and that has to be done by the
+suspend callbacks that the pm_runtime_suspended() check prevents from
+being executed.
+
+Similarly, it may not be a good idea to unconditionally change
+the runtime PM status of the device to 'active' in
+__pm_generic_resume(), because the driver may want to leave the
+device in the 'suspended' state, depending on what happened to it
+before the system suspend and whether or not it is enabled to
+wake up the system.
+
+For the above reasons, remove the pm_runtime_suspended()
+check from __pm_generic_call() and remove the code changing the
+device's runtime PM status from __pm_generic_resume().
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 8ca6d9bcc8d33c592c0855b4b1481bc723ac7e85)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/generic_ops.c |   24 ++++++------------------
+ 1 file changed, 6 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
+index 265a0ee..1b878b955 100644
+--- a/drivers/base/power/generic_ops.c
++++ b/drivers/base/power/generic_ops.c
+@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
+  * @event: PM transition of the system under way.
+  * @bool: Whether or not this is the "noirq" stage.
+  *
+- * If the device has not been suspended at run time, execute the
+- * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
+- * return its error code.  Otherwise, return zero.
++ * Execute the suspend/freeze/poweroff/thaw callback provided by the driver of
++ * @dev, if defined, and return its error code.    Return 0 if the callback is
++ * not present.
+  */
+ static int __pm_generic_call(struct device *dev, int event, bool noirq)
+ {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*callback)(struct device *);
+-      if (!pm || pm_runtime_suspended(dev))
++      if (!pm)
+               return 0;
+       switch (event) {
+@@ -217,14 +217,12 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
+  * @bool: Whether or not this is the "noirq" stage.
+  *
+  * Execute the resume/resotre callback provided by the @dev's driver, if
+- * defined.  If it returns 0, change the device's runtime PM status to 'active'.
+- * Return the callback's error code.
++ * defined, and return its error code.  Return 0 if the callback is not present.
+  */
+ static int __pm_generic_resume(struct device *dev, int event, bool noirq)
+ {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*callback)(struct device *);
+-      int ret;
+       if (!pm)
+               return 0;
+@@ -241,17 +239,7 @@ static int __pm_generic_resume(struct device *dev, int event, bool noirq)
+               break;
+       }
+-      if (!callback)
+-              return 0;
+-
+-      ret = callback(dev);
+-      if (!ret && !noirq && pm_runtime_enabled(dev)) {
+-              pm_runtime_disable(dev);
+-              pm_runtime_set_active(dev);
+-              pm_runtime_enable(dev);
+-      }
+-
+-      return ret;
++      return callback ? callback(dev) : 0;
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0158-PM-Sleep-Merge-internal-functions-in-generic_ops.c.patch b/patches.runtime_pm/0158-PM-Sleep-Merge-internal-functions-in-generic_ops.c.patch
new file mode 100644 (file)
index 0000000..ccb9d80
--- /dev/null
@@ -0,0 +1,122 @@
+From e34db5ebf8c9e55fddb85498247ea9ceedd7c744 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Thu, 15 Dec 2011 20:59:30 +0100
+Subject: PM / Sleep: Merge internal functions in generic_ops.c
+
+After the change that removed the code related to runtime PM
+from __pm_generic_call() and __pm_generic_resume() these two
+functions need not be separate any more, so merge them.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 1eac8111e0763853266a171ce11214da3a347a0a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/generic_ops.c |   48 +++++++++-----------------------------
+ 1 file changed, 11 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
+index 1b878b955..5a5b154 100644
+--- a/drivers/base/power/generic_ops.c
++++ b/drivers/base/power/generic_ops.c
+@@ -97,7 +97,7 @@ int pm_generic_prepare(struct device *dev)
+  * @event: PM transition of the system under way.
+  * @bool: Whether or not this is the "noirq" stage.
+  *
+- * Execute the suspend/freeze/poweroff/thaw callback provided by the driver of
++ * Execute the PM callback corresponding to @event provided by the driver of
+  * @dev, if defined, and return its error code.    Return 0 if the callback is
+  * not present.
+  */
+@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
+       case PM_EVENT_HIBERNATE:
+               callback = noirq ? pm->poweroff_noirq : pm->poweroff;
+               break;
++      case PM_EVENT_RESUME:
++              callback = noirq ? pm->resume_noirq : pm->resume;
++              break;
+       case PM_EVENT_THAW:
+               callback = noirq ? pm->thaw_noirq : pm->thaw;
+               break;
++      case PM_EVENT_RESTORE:
++              callback = noirq ? pm->restore_noirq : pm->restore;
++              break;
+       default:
+               callback = NULL;
+               break;
+@@ -211,44 +217,12 @@ int pm_generic_thaw(struct device *dev)
+ EXPORT_SYMBOL_GPL(pm_generic_thaw);
+ /**
+- * __pm_generic_resume - Generic resume/restore callback for subsystems.
+- * @dev: Device to handle.
+- * @event: PM transition of the system under way.
+- * @bool: Whether or not this is the "noirq" stage.
+- *
+- * Execute the resume/resotre callback provided by the @dev's driver, if
+- * defined, and return its error code.  Return 0 if the callback is not present.
+- */
+-static int __pm_generic_resume(struct device *dev, int event, bool noirq)
+-{
+-      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+-      int (*callback)(struct device *);
+-
+-      if (!pm)
+-              return 0;
+-
+-      switch (event) {
+-      case PM_EVENT_RESUME:
+-              callback = noirq ? pm->resume_noirq : pm->resume;
+-              break;
+-      case PM_EVENT_RESTORE:
+-              callback = noirq ? pm->restore_noirq : pm->restore;
+-              break;
+-      default:
+-              callback = NULL;
+-              break;
+-      }
+-
+-      return callback ? callback(dev) : 0;
+-}
+-
+-/**
+  * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+  * @dev: Device to resume.
+  */
+ int pm_generic_resume_noirq(struct device *dev)
+ {
+-      return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
++      return __pm_generic_call(dev, PM_EVENT_RESUME, true);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+@@ -258,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+  */
+ int pm_generic_resume(struct device *dev)
+ {
+-      return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
++      return __pm_generic_call(dev, PM_EVENT_RESUME, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_resume);
+@@ -268,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
+  */
+ int pm_generic_restore_noirq(struct device *dev)
+ {
+-      return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
++      return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+@@ -278,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+  */
+ int pm_generic_restore(struct device *dev)
+ {
+-      return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
++      return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_restore);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0159-PM-Sleep-Make-pm_op-and-pm_noirq_op-return-callback-.patch b/patches.runtime_pm/0159-PM-Sleep-Make-pm_op-and-pm_noirq_op-return-callback-.patch
new file mode 100644 (file)
index 0000000..b792223
--- /dev/null
@@ -0,0 +1,394 @@
+From e67ec224d27ba800a297dfc34310a2a873581b2e Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 18 Dec 2011 00:34:01 +0100
+Subject: PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers
+
+Make the pm_op() and pm_noirq_op() functions return pointers to
+appropriate callbacks instead of executing those callbacks and
+returning their results.
+
+This change is required for a subsequent modification that will
+execute the corresponding driver callback if the subsystem
+callback returned by either pm_op(), or pm_noirq_op() is NULL.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 9cf519d1c15fa05a538c2b3963c5f3903daf765a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/main.c |  197 ++++++++++++++++++++++-----------------------
+ 1 file changed, 95 insertions(+), 102 deletions(-)
+
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index b570189..b5cef7e 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -32,6 +32,8 @@
+ #include "../base.h"
+ #include "power.h"
++typedef int (*pm_callback_t)(struct device *);
++
+ /*
+  * The entries in the dpm_list list are in a depth first order, simply
+  * because children are guaranteed to be discovered after parents, and
+@@ -211,113 +213,70 @@ static void dpm_wait_for_children(struct device *dev, bool async)
+        device_for_each_child(dev, &async, dpm_wait_fn);
+ }
+-static int dpm_run_callback(struct device *dev, int (*cb)(struct device *))
+-{
+-      ktime_t calltime;
+-      int error;
+-
+-      if (!cb)
+-              return 0;
+-
+-      calltime = initcall_debug_start(dev);
+-
+-      error = cb(dev);
+-      suspend_report_result(cb, error);
+-
+-      initcall_debug_report(dev, calltime, error);
+-
+-      return error;
+-}
+-
+ /**
+- * pm_op - Execute the PM operation appropriate for given PM event.
+- * @dev: Device to handle.
++ * pm_op - Return the PM operation appropriate for given PM event.
+  * @ops: PM operations to choose from.
+  * @state: PM transition of the system being carried out.
+  */
+-static int pm_op(struct device *dev,
+-               const struct dev_pm_ops *ops,
+-               pm_message_t state)
++static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
+ {
+-      int error = 0;
+-
+       switch (state.event) {
+ #ifdef CONFIG_SUSPEND
+       case PM_EVENT_SUSPEND:
+-              error = dpm_run_callback(dev, ops->suspend);
+-              break;
++              return ops->suspend;
+       case PM_EVENT_RESUME:
+-              error = dpm_run_callback(dev, ops->resume);
+-              break;
++              return ops->resume;
+ #endif /* CONFIG_SUSPEND */
+ #ifdef CONFIG_HIBERNATE_CALLBACKS
+       case PM_EVENT_FREEZE:
+       case PM_EVENT_QUIESCE:
+-              error = dpm_run_callback(dev, ops->freeze);
+-              break;
++              return ops->freeze;
+       case PM_EVENT_HIBERNATE:
+-              error = dpm_run_callback(dev, ops->poweroff);
+-              break;
++              return ops->poweroff;
+       case PM_EVENT_THAW:
+       case PM_EVENT_RECOVER:
+-              error = dpm_run_callback(dev, ops->thaw);
++              return ops->thaw;
+               break;
+       case PM_EVENT_RESTORE:
+-              error = dpm_run_callback(dev, ops->restore);
+-              break;
++              return ops->restore;
+ #endif /* CONFIG_HIBERNATE_CALLBACKS */
+-      default:
+-              error = -EINVAL;
+       }
+-      return error;
++      return NULL;
+ }
+ /**
+- * pm_noirq_op - Execute the PM operation appropriate for given PM event.
+- * @dev: Device to handle.
++ * pm_noirq_op - Return the PM operation appropriate for given PM event.
+  * @ops: PM operations to choose from.
+  * @state: PM transition of the system being carried out.
+  *
+  * The driver of @dev will not receive interrupts while this function is being
+  * executed.
+  */
+-static int pm_noirq_op(struct device *dev,
+-                      const struct dev_pm_ops *ops,
+-                      pm_message_t state)
++static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
+ {
+-      int error = 0;
+-
+       switch (state.event) {
+ #ifdef CONFIG_SUSPEND
+       case PM_EVENT_SUSPEND:
+-              error = dpm_run_callback(dev, ops->suspend_noirq);
+-              break;
++              return ops->suspend_noirq;
+       case PM_EVENT_RESUME:
+-              error = dpm_run_callback(dev, ops->resume_noirq);
+-              break;
++              return ops->resume_noirq;
+ #endif /* CONFIG_SUSPEND */
+ #ifdef CONFIG_HIBERNATE_CALLBACKS
+       case PM_EVENT_FREEZE:
+       case PM_EVENT_QUIESCE:
+-              error = dpm_run_callback(dev, ops->freeze_noirq);
+-              break;
++              return ops->freeze_noirq;
+       case PM_EVENT_HIBERNATE:
+-              error = dpm_run_callback(dev, ops->poweroff_noirq);
+-              break;
++              return ops->poweroff_noirq;
+       case PM_EVENT_THAW:
+       case PM_EVENT_RECOVER:
+-              error = dpm_run_callback(dev, ops->thaw_noirq);
+-              break;
++              return ops->thaw_noirq;
+       case PM_EVENT_RESTORE:
+-              error = dpm_run_callback(dev, ops->restore_noirq);
+-              break;
++              return ops->restore_noirq;
+ #endif /* CONFIG_HIBERNATE_CALLBACKS */
+-      default:
+-              error = -EINVAL;
+       }
+-      return error;
++      return NULL;
+ }
+ static char *pm_verb(int event)
+@@ -375,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
+               usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+ }
++static int dpm_run_callback(pm_callback_t cb, struct device *dev,
++                          pm_message_t state, char *info)
++{
++      ktime_t calltime;
++      int error;
++
++      if (!cb)
++              return 0;
++
++      calltime = initcall_debug_start(dev);
++
++      pm_dev_dbg(dev, state, info);
++      error = cb(dev);
++      suspend_report_result(cb, error);
++
++      initcall_debug_report(dev, calltime, error);
++
++      return error;
++}
++
+ /*------------------------- Resume routines -------------------------*/
+ /**
+@@ -387,25 +366,29 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
+  */
+ static int device_resume_noirq(struct device *dev, pm_message_t state)
+ {
++      pm_callback_t callback = NULL;
++      char *info = NULL;
+       int error = 0;
+       TRACE_DEVICE(dev);
+       TRACE_RESUME(0);
+       if (dev->pm_domain) {
+-              pm_dev_dbg(dev, state, "EARLY power domain ");
+-              error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
++              info = "EARLY power domain ";
++              callback = pm_noirq_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+-              pm_dev_dbg(dev, state, "EARLY type ");
+-              error = pm_noirq_op(dev, dev->type->pm, state);
++              info = "EARLY type ";
++              callback = pm_noirq_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+-              pm_dev_dbg(dev, state, "EARLY class ");
+-              error = pm_noirq_op(dev, dev->class->pm, state);
++              info = "EARLY class ";
++              callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+-              pm_dev_dbg(dev, state, "EARLY ");
+-              error = pm_noirq_op(dev, dev->bus->pm, state);
++              info = "EARLY ";
++              callback = pm_noirq_op(dev->bus->pm, state);
+       }
++      error = dpm_run_callback(callback, dev, state, info);
++
+       TRACE_RESUME(error);
+       return error;
+ }
+@@ -455,6 +438,8 @@ EXPORT_SYMBOL_GPL(dpm_resume_noirq);
+  */
+ static int device_resume(struct device *dev, pm_message_t state, bool async)
+ {
++      pm_callback_t callback = NULL;
++      char *info = NULL;
+       int error = 0;
+       bool put = false;
+@@ -477,40 +462,41 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+       put = true;
+       if (dev->pm_domain) {
+-              pm_dev_dbg(dev, state, "power domain ");
+-              error = pm_op(dev, &dev->pm_domain->ops, state);
++              info = "power domain ";
++              callback = pm_op(&dev->pm_domain->ops, state);
+               goto End;
+       }
+       if (dev->type && dev->type->pm) {
+-              pm_dev_dbg(dev, state, "type ");
+-              error = pm_op(dev, dev->type->pm, state);
++              info = "type ";
++              callback = pm_op(dev->type->pm, state);
+               goto End;
+       }
+       if (dev->class) {
+               if (dev->class->pm) {
+-                      pm_dev_dbg(dev, state, "class ");
+-                      error = pm_op(dev, dev->class->pm, state);
++                      info = "class ";
++                      callback = pm_op(dev->class->pm, state);
+                       goto End;
+               } else if (dev->class->resume) {
+-                      pm_dev_dbg(dev, state, "legacy class ");
+-                      error = dpm_run_callback(dev, dev->class->resume);
++                      info = "legacy class ";
++                      callback = dev->class->resume;
+                       goto End;
+               }
+       }
+       if (dev->bus) {
+               if (dev->bus->pm) {
+-                      pm_dev_dbg(dev, state, "");
+-                      error = pm_op(dev, dev->bus->pm, state);
++                      info = "";
++                      callback = pm_op(dev->bus->pm, state);
+               } else if (dev->bus->resume) {
+-                      pm_dev_dbg(dev, state, "legacy ");
+-                      error = dpm_run_callback(dev, dev->bus->resume);
++                      info = "legacy ";
++                      callback = dev->bus->resume;
+               }
+       }
+  End:
++      error = dpm_run_callback(callback, dev, state, info);
+       dev->power.is_suspended = false;
+  Unlock:
+@@ -705,23 +691,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
+  */
+ static int device_suspend_noirq(struct device *dev, pm_message_t state)
+ {
+-      int error = 0;
++      pm_callback_t callback = NULL;
++      char *info = NULL;
+       if (dev->pm_domain) {
+-              pm_dev_dbg(dev, state, "LATE power domain ");
+-              error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
++              info = "LATE power domain ";
++              callback = pm_noirq_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+-              pm_dev_dbg(dev, state, "LATE type ");
+-              error = pm_noirq_op(dev, dev->type->pm, state);
++              info = "LATE type ";
++              callback = pm_noirq_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+-              pm_dev_dbg(dev, state, "LATE class ");
+-              error = pm_noirq_op(dev, dev->class->pm, state);
++              info = "LATE class ";
++              callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+-              pm_dev_dbg(dev, state, "LATE ");
+-              error = pm_noirq_op(dev, dev->bus->pm, state);
++              info = "LATE ";
++              callback = pm_noirq_op(dev->bus->pm, state);
+       }
+-      return error;
++      return dpm_run_callback(callback, dev, state, info);
+ }
+ /**
+@@ -798,6 +785,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
+  */
+ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ {
++      pm_callback_t callback = NULL;
++      char *info = NULL;
+       int error = 0;
+       dpm_wait_for_children(dev, async);
+@@ -818,22 +807,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+       device_lock(dev);
+       if (dev->pm_domain) {
+-              pm_dev_dbg(dev, state, "power domain ");
+-              error = pm_op(dev, &dev->pm_domain->ops, state);
+-              goto End;
++              info = "power domain ";
++              callback = pm_op(&dev->pm_domain->ops, state);
++              goto Run;
+       }
+       if (dev->type && dev->type->pm) {
+-              pm_dev_dbg(dev, state, "type ");
+-              error = pm_op(dev, dev->type->pm, state);
+-              goto End;
++              info = "type ";
++              callback = pm_op(dev->type->pm, state);
++              goto Run;
+       }
+       if (dev->class) {
+               if (dev->class->pm) {
+-                      pm_dev_dbg(dev, state, "class ");
+-                      error = pm_op(dev, dev->class->pm, state);
+-                      goto End;
++                      info = "class ";
++                      callback = pm_op(dev->class->pm, state);
++                      goto Run;
+               } else if (dev->class->suspend) {
+                       pm_dev_dbg(dev, state, "legacy class ");
+                       error = legacy_suspend(dev, state, dev->class->suspend);
+@@ -843,14 +832,18 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+       if (dev->bus) {
+               if (dev->bus->pm) {
+-                      pm_dev_dbg(dev, state, "");
+-                      error = pm_op(dev, dev->bus->pm, state);
++                      info = "";
++                      callback = pm_op(dev->bus->pm, state);
+               } else if (dev->bus->suspend) {
+                       pm_dev_dbg(dev, state, "legacy ");
+                       error = legacy_suspend(dev, state, dev->bus->suspend);
++                      goto End;
+               }
+       }
++ Run:
++      error = dpm_run_callback(callback, dev, state, info);
++
+  End:
+       if (!error) {
+               dev->power.is_suspended = true;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0160-PM-Run-the-driver-callback-directly-if-the-subsystem.patch b/patches.runtime_pm/0160-PM-Run-the-driver-callback-directly-if-the-subsystem.patch
new file mode 100644 (file)
index 0000000..af0ba79
--- /dev/null
@@ -0,0 +1,512 @@
+From 489af6027ddbe97a7922473881a74a3e2b7e59bf Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 18 Dec 2011 00:34:13 +0100
+Subject: PM: Run the driver callback directly if the subsystem one is not
+ there
+
+Make the PM core execute driver PM callbacks directly if the
+corresponding subsystem callbacks are not present.
+
+There are three reasons for doing that.  First, it reflects the
+behavior of drivers/base/dd.c:really_probe() that runs the driver's
+.probe() callback directly if the bus type's one is not defined, so
+this change will remove one arbitrary difference between the PM core
+and the remaining parts of the driver core.  Second, it will allow
+some subsystems, whose PM callbacks don't do anything except for
+executing driver callbacks, to be simplified quite a bit by removing
+those "forward-only" callbacks.  Finally, it will allow us to remove
+one level of indirection in the system suspend and resume code paths
+where it is not necessary, which is going to lead to less debug noise
+with initcall_debug passed in the kernel command line (messages won't
+be printed for driverless devices whose subsystems don't provide
+PM callbacks among other things).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 35cd133c6130c1eb52806808abee9d62e6854a27)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt    |   37 +++++-----
+ Documentation/power/runtime_pm.txt |  130 +++++++++++++++++++-----------------
+ drivers/base/power/main.c          |  109 ++++++++++++++++++++----------
+ drivers/base/power/runtime.c       |    9 +++
+ 4 files changed, 170 insertions(+), 115 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 3139fb5..20af7de 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
+ pointed to by the ops member of struct dev_pm_domain, or by the pm member of
+ struct bus_type, struct device_type and struct class.  They are mostly of
+ interest to the people writing infrastructure for platforms and buses, like PCI
+-or USB, or device type and device class drivers.
++or USB, or device type and device class drivers.  They also are relevant to the
++writers of device drivers whose subsystems (PM domains, device types, device
++classes and bus types) don't provide all power management methods.
+ Bus drivers implement these methods as appropriate for the hardware and the
+ drivers using it; PCI works differently from USB, and so on.  Not many people
+@@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
+ unfrozen.  Furthermore, the *_noirq phases run at a time when IRQ handlers have
+ been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+-All phases use PM domain, bus, type, or class callbacks (that is, methods
+-defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
+-These callbacks are regarded by the PM core as mutually exclusive.  Moreover,
+-PM domain callbacks always take precedence over bus, type and class callbacks,
+-while type callbacks take precedence over bus and class callbacks, and class
+-callbacks take precedence over bus callbacks.  To be precise, the following
+-rules are used to determine which callback to execute in the given phase:
++All phases use PM domain, bus, type, class or driver callbacks (that is, methods
++defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
++dev->driver->pm).  These callbacks are regarded by the PM core as mutually
++exclusive.  Moreover, PM domain callbacks always take precedence over all of the
++other callbacks and, for example, type callbacks take precedence over bus, class
++and driver callbacks.  To be precise, the following rules are used to determine
++which callback to execute in the given phase:
+-    1.        If dev->pm_domain is present, the PM core will attempt to execute the
+-      callback included in dev->pm_domain->ops.  If that callback is not
+-      present, no action will be carried out for the given device.
++    1.        If dev->pm_domain is present, the PM core will choose the callback
++      included in dev->pm_domain->ops for execution
+     2.        Otherwise, if both dev->type and dev->type->pm are present, the callback
+-      included in dev->type->pm will be executed.
++      included in dev->type->pm will be chosen for execution.
+     3.        Otherwise, if both dev->class and dev->class->pm are present, the
+-      callback included in dev->class->pm will be executed.
++      callback included in dev->class->pm will be chosen for execution.
+     4.        Otherwise, if both dev->bus and dev->bus->pm are present, the callback
+-      included in dev->bus->pm will be executed.
++      included in dev->bus->pm will be chosen for execution.
+ This allows PM domains and device types to override callbacks provided by bus
+ types or device classes if necessary.
+-These callbacks may in turn invoke device- or driver-specific methods stored in
+-dev->driver->pm, but they don't have to.
++The PM domain, type, class and bus callbacks may in turn invoke device- or
++driver-specific methods stored in dev->driver->pm, but they don't have to do
++that.
++
++If the subsystem callback chosen for execution is not present, the PM core will
++execute the corresponding method from dev->driver->pm instead if there is one.
+ Entering System Suspend
+diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
+index c2ae8bf..4abe83e 100644
+--- a/Documentation/power/runtime_pm.txt
++++ b/Documentation/power/runtime_pm.txt
+@@ -57,6 +57,10 @@ the following:
+   4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
++If the subsystem chosen by applying the above rules doesn't provide the relevant
++callback, the PM core will invoke the corresponding driver callback stored in
++dev->driver->pm directly (if present).
++
+ The PM core always checks which callback to use in the order given above, so the
+ priority order of callbacks from high to low is: PM domain, device type, class
+ and bus type.  Moreover, the high-priority one will always take precedence over
+@@ -64,86 +68,88 @@ a low-priority one.  The PM domain, bus type, device type and class callbacks
+ are referred to as subsystem-level callbacks in what follows.
+ By default, the callbacks are always invoked in process context with interrupts
+-enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
+-to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
+-->runtime_idle() callbacks may be invoked in atomic context with interrupts
+-disabled for a given device.  This implies that the callback routines in
+-question must not block or sleep, but it also means that the synchronous helper
+-functions listed at the end of Section 4 may be used for that device within an
+-interrupt handler or generally in an atomic context.
+-
+-The subsystem-level suspend callback is _entirely_ _responsible_ for handling
+-the suspend of the device as appropriate, which may, but need not include
+-executing the device driver's own ->runtime_suspend() callback (from the
++enabled.  However, the pm_runtime_irq_safe() helper function can be used to tell
++the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
++and ->runtime_idle() callbacks for the given device in atomic context with
++interrupts disabled.  This implies that the callback routines in question must
++not block or sleep, but it also means that the synchronous helper functions
++listed at the end of Section 4 may be used for that device within an interrupt
++handler or generally in an atomic context.
++
++The subsystem-level suspend callback, if present, is _entirely_ _responsible_
++for handling the suspend of the device as appropriate, which may, but need not
++include executing the device driver's own ->runtime_suspend() callback (from the
+ PM core's point of view it is not necessary to implement a ->runtime_suspend()
+ callback in a device driver as long as the subsystem-level suspend callback
+ knows what to do to handle the device).
+-  * Once the subsystem-level suspend callback has completed successfully
+-    for given device, the PM core regards the device as suspended, which need
+-    not mean that the device has been put into a low power state.  It is
+-    supposed to mean, however, that the device will not process data and will
+-    not communicate with the CPU(s) and RAM until the subsystem-level resume
+-    callback is executed for it.  The runtime PM status of a device after
+-    successful execution of the subsystem-level suspend callback is 'suspended'.
+-
+-  * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
+-    the device's runtime PM status is 'active', which means that the device
+-    _must_ be fully operational afterwards.
+-
+-  * If the subsystem-level suspend callback returns an error code different
+-    from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
+-    refuse to run the helper functions described in Section 4 for the device,
+-    until the status of it is directly set either to 'active', or to 'suspended'
+-    (the PM core provides special helper functions for this purpose).
+-
+-In particular, if the driver requires remote wake-up capability (i.e. hardware
++  * Once the subsystem-level suspend callback (or the driver suspend callback,
++    if invoked directly) has completed successfully for the given device, the PM
++    core regards the device as suspended, which need not mean that it has been
++    put into a low power state.  It is supposed to mean, however, that the
++    device will not process data and will not communicate with the CPU(s) and
++    RAM until the appropriate resume callback is executed for it.  The runtime
++    PM status of a device after successful execution of the suspend callback is
++    'suspended'.
++
++  * If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
++    status remains 'active', which means that the device _must_ be fully
++    operational afterwards.
++
++  * If the suspend callback returns an error code different from -EBUSY and
++    -EAGAIN, the PM core regards this as a fatal error and will refuse to run
++    the helper functions described in Section 4 for the device until its status
++    is directly set to  either'active', or 'suspended' (the PM core provides
++    special helper functions for this purpose).
++
++In particular, if the driver requires remote wakeup capability (i.e. hardware
+ mechanism allowing the device to request a change of its power state, such as
+ PCI PME) for proper functioning and device_run_wake() returns 'false' for the
+ device, then ->runtime_suspend() should return -EBUSY.  On the other hand, if
+-device_run_wake() returns 'true' for the device and the device is put into a low
+-power state during the execution of the subsystem-level suspend callback, it is
+-expected that remote wake-up will be enabled for the device.  Generally, remote
+-wake-up should be enabled for all input devices put into a low power state at
+-run time.
+-
+-The subsystem-level resume callback is _entirely_ _responsible_ for handling the
+-resume of the device as appropriate, which may, but need not include executing
+-the device driver's own ->runtime_resume() callback (from the PM core's point of
+-view it is not necessary to implement a ->runtime_resume() callback in a device
+-driver as long as the subsystem-level resume callback knows what to do to handle
+-the device).
+-
+-  * Once the subsystem-level resume callback has completed successfully, the PM
+-    core regards the device as fully operational, which means that the device
+-    _must_ be able to complete I/O operations as needed.  The runtime PM status
+-    of the device is then 'active'.
+-
+-  * If the subsystem-level resume callback returns an error code, the PM core
+-    regards this as a fatal error and will refuse to run the helper functions
+-    described in Section 4 for the device, until its status is directly set
+-    either to 'active' or to 'suspended' (the PM core provides special helper
+-    functions for this purpose).
+-
+-The subsystem-level idle callback is executed by the PM core whenever the device
+-appears to be idle, which is indicated to the PM core by two counters, the
+-device's usage counter and the counter of 'active' children of the device.
++device_run_wake() returns 'true' for the device and the device is put into a
++low-power state during the execution of the suspend callback, it is expected
++that remote wakeup will be enabled for the device.  Generally, remote wakeup
++should be enabled for all input devices put into low-power states at run time.
++
++The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
++handling the resume of the device as appropriate, which may, but need not
++include executing the device driver's own ->runtime_resume() callback (from the
++PM core's point of view it is not necessary to implement a ->runtime_resume()
++callback in a device driver as long as the subsystem-level resume callback knows
++what to do to handle the device).
++
++  * Once the subsystem-level resume callback (or the driver resume callback, if
++    invoked directly) has completed successfully, the PM core regards the device
++    as fully operational, which means that the device _must_ be able to complete
++    I/O operations as needed.  The runtime PM status of the device is then
++    'active'.
++
++  * If the resume callback returns an error code, the PM core regards this as a
++    fatal error and will refuse to run the helper functions described in Section
++    4 for the device, until its status is directly set to either 'active', or
++    'suspended' (by means of special helper functions provided by the PM core
++    for this purpose).
++
++The idle callback (a subsystem-level one, if present, or the driver one) is
++executed by the PM core whenever the device appears to be idle, which is
++indicated to the PM core by two counters, the device's usage counter and the
++counter of 'active' children of the device.
+   * If any of these counters is decreased using a helper function provided by
+     the PM core and it turns out to be equal to zero, the other counter is
+     checked.  If that counter also is equal to zero, the PM core executes the
+-    subsystem-level idle callback with the device as an argument.
++    idle callback with the device as its argument.
+-The action performed by a subsystem-level idle callback is totally dependent on
+-the subsystem in question, but the expected and recommended action is to check
++The action performed by the idle callback is totally dependent on the subsystem
++(or driver) in question, but the expected and recommended action is to check
+ if the device can be suspended (i.e. if all of the conditions necessary for
+ suspending the device are satisfied) and to queue up a suspend request for the
+ device in that case.  The value returned by this callback is ignored by the PM
+ core.
+ The helper functions provided by the PM core, described in Section 4, guarantee
+-that the following constraints are met with respect to the bus type's runtime
+-PM callbacks:
++that the following constraints are met with respect to runtime PM callbacks for
++one device:
+ (1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
+     ->runtime_suspend() in parallel with ->runtime_resume() or with another
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index b5cef7e..e2cc3d2 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -383,10 +383,15 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
+               info = "EARLY class ";
+               callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+-              info = "EARLY ";
++              info = "EARLY bus ";
+               callback = pm_noirq_op(dev->bus->pm, state);
+       }
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "EARLY driver ";
++              callback = pm_noirq_op(dev->driver->pm, state);
++      }
++
+       error = dpm_run_callback(callback, dev, state, info);
+       TRACE_RESUME(error);
+@@ -464,20 +469,20 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+       if (dev->pm_domain) {
+               info = "power domain ";
+               callback = pm_op(&dev->pm_domain->ops, state);
+-              goto End;
++              goto Driver;
+       }
+       if (dev->type && dev->type->pm) {
+               info = "type ";
+               callback = pm_op(dev->type->pm, state);
+-              goto End;
++              goto Driver;
+       }
+       if (dev->class) {
+               if (dev->class->pm) {
+                       info = "class ";
+                       callback = pm_op(dev->class->pm, state);
+-                      goto End;
++                      goto Driver;
+               } else if (dev->class->resume) {
+                       info = "legacy class ";
+                       callback = dev->class->resume;
+@@ -487,14 +492,21 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+       if (dev->bus) {
+               if (dev->bus->pm) {
+-                      info = "";
++                      info = "bus ";
+                       callback = pm_op(dev->bus->pm, state);
+               } else if (dev->bus->resume) {
+-                      info = "legacy ";
++                      info = "legacy bus ";
+                       callback = dev->bus->resume;
++                      goto End;
+               }
+       }
++ Driver:
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "driver ";
++              callback = pm_op(dev->driver->pm, state);
++      }
++
+  End:
+       error = dpm_run_callback(callback, dev, state, info);
+       dev->power.is_suspended = false;
+@@ -588,24 +600,33 @@ void dpm_resume(pm_message_t state)
+  */
+ static void device_complete(struct device *dev, pm_message_t state)
+ {
++      void (*callback)(struct device *) = NULL;
++      char *info = NULL;
++
+       device_lock(dev);
+       if (dev->pm_domain) {
+-              pm_dev_dbg(dev, state, "completing power domain ");
+-              if (dev->pm_domain->ops.complete)
+-                      dev->pm_domain->ops.complete(dev);
++              info = "completing power domain ";
++              callback = dev->pm_domain->ops.complete;
+       } else if (dev->type && dev->type->pm) {
+-              pm_dev_dbg(dev, state, "completing type ");
+-              if (dev->type->pm->complete)
+-                      dev->type->pm->complete(dev);
++              info = "completing type ";
++              callback = dev->type->pm->complete;
+       } else if (dev->class && dev->class->pm) {
+-              pm_dev_dbg(dev, state, "completing class ");
+-              if (dev->class->pm->complete)
+-                      dev->class->pm->complete(dev);
++              info = "completing class ";
++              callback = dev->class->pm->complete;
+       } else if (dev->bus && dev->bus->pm) {
+-              pm_dev_dbg(dev, state, "completing ");
+-              if (dev->bus->pm->complete)
+-                      dev->bus->pm->complete(dev);
++              info = "completing bus ";
++              callback = dev->bus->pm->complete;
++      }
++
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "completing driver ";
++              callback = dev->driver->pm->complete;
++      }
++
++      if (callback) {
++              pm_dev_dbg(dev, state, info);
++              callback(dev);
+       }
+       device_unlock(dev);
+@@ -704,10 +725,15 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
+               info = "LATE class ";
+               callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+-              info = "LATE ";
++              info = "LATE bus ";
+               callback = pm_noirq_op(dev->bus->pm, state);
+       }
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "LATE driver ";
++              callback = pm_noirq_op(dev->driver->pm, state);
++      }
++
+       return dpm_run_callback(callback, dev, state, info);
+ }
+@@ -832,16 +858,21 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+       if (dev->bus) {
+               if (dev->bus->pm) {
+-                      info = "";
++                      info = "bus ";
+                       callback = pm_op(dev->bus->pm, state);
+               } else if (dev->bus->suspend) {
+-                      pm_dev_dbg(dev, state, "legacy ");
++                      pm_dev_dbg(dev, state, "legacy bus ");
+                       error = legacy_suspend(dev, state, dev->bus->suspend);
+                       goto End;
+               }
+       }
+  Run:
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "driver ";
++              callback = pm_op(dev->driver->pm, state);
++      }
++
+       error = dpm_run_callback(callback, dev, state, info);
+  End:
+@@ -949,6 +980,8 @@ int dpm_suspend(pm_message_t state)
+  */
+ static int device_prepare(struct device *dev, pm_message_t state)
+ {
++      int (*callback)(struct device *) = NULL;
++      char *info = NULL;
+       int error = 0;
+       device_lock(dev);
+@@ -956,25 +989,27 @@ static int device_prepare(struct device *dev, pm_message_t state)
+       dev->power.wakeup_path = device_may_wakeup(dev);
+       if (dev->pm_domain) {
+-              pm_dev_dbg(dev, state, "preparing power domain ");
+-              if (dev->pm_domain->ops.prepare)
+-                      error = dev->pm_domain->ops.prepare(dev);
+-              suspend_report_result(dev->pm_domain->ops.prepare, error);
++              info = "preparing power domain ";
++              callback = dev->pm_domain->ops.prepare;
+       } else if (dev->type && dev->type->pm) {
+-              pm_dev_dbg(dev, state, "preparing type ");
+-              if (dev->type->pm->prepare)
+-                      error = dev->type->pm->prepare(dev);
+-              suspend_report_result(dev->type->pm->prepare, error);
++              info = "preparing type ";
++              callback = dev->type->pm->prepare;
+       } else if (dev->class && dev->class->pm) {
+-              pm_dev_dbg(dev, state, "preparing class ");
+-              if (dev->class->pm->prepare)
+-                      error = dev->class->pm->prepare(dev);
+-              suspend_report_result(dev->class->pm->prepare, error);
++              info = "preparing class ";
++              callback = dev->class->pm->prepare;
+       } else if (dev->bus && dev->bus->pm) {
+-              pm_dev_dbg(dev, state, "preparing ");
+-              if (dev->bus->pm->prepare)
+-                      error = dev->bus->pm->prepare(dev);
+-              suspend_report_result(dev->bus->pm->prepare, error);
++              info = "preparing bus ";
++              callback = dev->bus->pm->prepare;
++      }
++
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "preparing driver ";
++              callback = dev->driver->pm->prepare;
++      }
++
++      if (callback) {
++              error = callback(dev);
++              suspend_report_result(callback, error);
+       }
+       device_unlock(dev);
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 068f7ed..541f821 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
+       else
+               callback = NULL;
++      if (!callback && dev->driver && dev->driver->pm)
++              callback = dev->driver->pm->runtime_idle;
++
+       if (callback)
+               __rpm_callback(callback, dev);
+@@ -485,6 +488,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       else
+               callback = NULL;
++      if (!callback && dev->driver && dev->driver->pm)
++              callback = dev->driver->pm->runtime_suspend;
++
+       retval = rpm_callback(callback, dev);
+       if (retval)
+               goto fail;
+@@ -713,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
+       else
+               callback = NULL;
++      if (!callback && dev->driver && dev->driver->pm)
++              callback = dev->driver->pm->runtime_resume;
++
+       retval = rpm_callback(callback, dev);
+       if (retval) {
+               __update_runtime_status(dev, RPM_SUSPENDED);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0161-PM-Drop-generic_subsys_pm_ops.patch b/patches.runtime_pm/0161-PM-Drop-generic_subsys_pm_ops.patch
new file mode 100644 (file)
index 0000000..77181ed
--- /dev/null
@@ -0,0 +1,100 @@
+From b6c13121853d97beae270e74ef8e151ab20c8ec9 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 18 Dec 2011 00:34:42 +0100
+Subject: PM: Drop generic_subsys_pm_ops
+
+Since the PM core is now going to execute driver callbacks directly
+if the corresponding subsystem callbacks are not present,
+forward-only subsystem callbacks (i.e. such that only execute the
+corresponding driver callbacks) are not necessary any more.  Thus
+it is possible to remove generic_subsys_pm_ops, because the only
+callback in there that is not forward-only, .runtime_idle, is not
+really used by the only user of generic_subsys_pm_ops, which is
+vio_bus_type.
+
+However, the generic callback routines themselves cannot be removed
+from generic_ops.c, because they are used individually by a number
+of subsystems.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 90363ddf0a1a4dccfbb8d0c10b8f488bc7fa69f8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ arch/powerpc/kernel/vio.c        |    1 -
+ drivers/base/power/generic_ops.c |   25 -------------------------
+ include/linux/pm.h               |   13 -------------
+ 3 files changed, 39 deletions(-)
+
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index 1b695fd..83387bf 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -1400,7 +1400,6 @@ static struct bus_type vio_bus_type = {
+       .match = vio_bus_match,
+       .probe = vio_bus_probe,
+       .remove = vio_bus_remove,
+-      .pm = GENERIC_SUBSYS_PM_OPS,
+ };
+ /**
+diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
+index 5a5b154..10bdd79 100644
+--- a/drivers/base/power/generic_ops.c
++++ b/drivers/base/power/generic_ops.c
+@@ -276,28 +276,3 @@ void pm_generic_complete(struct device *dev)
+       pm_runtime_idle(dev);
+ }
+ #endif /* CONFIG_PM_SLEEP */
+-
+-struct dev_pm_ops generic_subsys_pm_ops = {
+-#ifdef CONFIG_PM_SLEEP
+-      .prepare = pm_generic_prepare,
+-      .suspend = pm_generic_suspend,
+-      .suspend_noirq = pm_generic_suspend_noirq,
+-      .resume = pm_generic_resume,
+-      .resume_noirq = pm_generic_resume_noirq,
+-      .freeze = pm_generic_freeze,
+-      .freeze_noirq = pm_generic_freeze_noirq,
+-      .thaw = pm_generic_thaw,
+-      .thaw_noirq = pm_generic_thaw_noirq,
+-      .poweroff = pm_generic_poweroff,
+-      .poweroff_noirq = pm_generic_poweroff_noirq,
+-      .restore = pm_generic_restore,
+-      .restore_noirq = pm_generic_restore_noirq,
+-      .complete = pm_generic_complete,
+-#endif
+-#ifdef CONFIG_PM_RUNTIME
+-      .runtime_suspend = pm_generic_runtime_suspend,
+-      .runtime_resume = pm_generic_runtime_resume,
+-      .runtime_idle = pm_generic_runtime_idle,
+-#endif
+-};
+-EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index a7676ef..e4982ac 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
+       SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+-/*
+- * Use this for subsystems (bus types, device types, device classes) that don't
+- * need any special suspend/resume handling in addition to invoking the PM
+- * callbacks provided by device drivers supporting both the system sleep PM and
+- * runtime PM, make the pm member point to generic_subsys_pm_ops.
+- */
+-#ifdef CONFIG_PM
+-extern struct dev_pm_ops generic_subsys_pm_ops;
+-#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
+-#else
+-#define GENERIC_SUBSYS_PM_OPS NULL
+-#endif
+-
+ /**
+  * PM_EVENT_ messages
+  *
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0162-PM-QoS-Introduce-dev_pm_qos_add_ancestor_request.patch b/patches.runtime_pm/0162-PM-QoS-Introduce-dev_pm_qos_add_ancestor_request.patch
new file mode 100644 (file)
index 0000000..df9dbbe
--- /dev/null
@@ -0,0 +1,111 @@
+From 6d3f38b0892bddfe81c70350d3cc11108f3157f8 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 23 Dec 2011 01:23:52 +0100
+Subject: PM / QoS: Introduce dev_pm_qos_add_ancestor_request()
+
+Some devices, like the I2C controller on SH7372, are not
+necessary for providing power to their children or forwarding
+wakeup signals (and generally interrupts) from them.  They are
+only needed by their children when there's some data to transfer,
+so they may be suspended for the majority of time and resumed
+on demand, when the children have data to send or receive.  For this
+purpose, however, their power.ignore_children flags have to be set,
+or the PM core wouldn't allow them to be suspended while their
+children were active.
+
+Unfortunately, in some situations it may take too much time to
+resume such devices so that they can assist their children in
+transferring data.  For example, if such a device belongs to a PM
+domain which goes to the "power off" state when that device is
+suspended, it may take too much time to restore power to the
+domain in response to the request from one of the device's
+children.  In that case, if the parent's resume time is critical,
+the domain should stay in the "power on" state, although it still may
+be desirable to power manage the parent itself (e.g. by manipulating
+its clock).
+
+In general, device PM QoS may be used to address this problem.
+Namely, if the device's children added PM QoS latency constraints
+for it, they would be able to prevent it from being put into an
+overly deep low-power state.  However, in some cases the devices
+needing to be serviced are not the immediate children of a
+"children-ignoring" device, but its grandchildren or even less
+direct descendants.  In those cases, the entity wanting to add a
+PM QoS request for a given device's ancestor that ignores its
+children will have to find it in the first place, so introduce a new
+helper function that may be used to achieve that.  This function,
+dev_pm_qos_add_ancestor_request(), will search for the first
+ancestor of the given device whose power.ignore_children flag is
+set and will add a device PM QoS latency request for that ancestor
+on behalf of the caller.  The request added this way may be removed
+with the help of dev_pm_qos_remove_request() in the future, like
+any other device PM QoS latency request.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 40a5f8be2f482783de0f1f0fe856660e489734a8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/qos.c |   25 +++++++++++++++++++++++++
+ include/linux/pm_qos.h   |    5 +++++
+ 2 files changed, 30 insertions(+)
+
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index 03f4bd0..c5d3588 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -420,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
+       return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
++
++/**
++ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
++ * @dev: Device whose ancestor to add the request for.
++ * @req: Pointer to the preallocated handle.
++ * @value: Constraint latency value.
++ */
++int dev_pm_qos_add_ancestor_request(struct device *dev,
++                                  struct dev_pm_qos_request *req, s32 value)
++{
++      struct device *ancestor = dev->parent;
++      int error = -ENODEV;
++
++      while (ancestor && !ancestor->power.ignore_children)
++              ancestor = ancestor->parent;
++
++      if (ancestor)
++              error = dev_pm_qos_add_request(ancestor, req, value);
++
++      if (error)
++              req->dev = NULL;
++
++      return error;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 775a323..e5bbcba 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -92,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
+ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
+ void dev_pm_qos_constraints_init(struct device *dev);
+ void dev_pm_qos_constraints_destroy(struct device *dev);
++int dev_pm_qos_add_ancestor_request(struct device *dev,
++                                  struct dev_pm_qos_request *req, s32 value);
+ #else
+ static inline int pm_qos_update_target(struct pm_qos_constraints *c,
+                                      struct plist_node *node,
+@@ -153,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
+ {
+       dev->power.power_state = PMSG_INVALID;
+ }
++static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
++                                  struct dev_pm_qos_request *req, s32 value)
++                      { return 0; }
+ #endif
+ #endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0163-power_supply-Add-initial-Charger-Manager-driver.patch b/patches.runtime_pm/0163-power_supply-Add-initial-Charger-Manager-driver.patch
new file mode 100644 (file)
index 0000000..8b7114b
--- /dev/null
@@ -0,0 +1,1143 @@
+From 7ab1eef699974deb1db6729dd425bffb063ff007 Mon Sep 17 00:00:00 2001
+From: Donggeun Kim <dg77.kim@samsung.com>
+Date: Tue, 27 Dec 2011 18:47:48 +0900
+Subject: power_supply: Add initial Charger-Manager driver
+
+Because battery health monitoring should be done even when suspended,
+it needs to wake up and suspend periodically. Thus, userspace battery
+monitoring may incur too much overhead; every device and task is woken
+up periodically. Charger Manager uses suspend-again to provide
+in-suspend monitoring.
+
+This patch allows to monitor battery health in-suspend state.
+
+Signed-off-by: Donggeun Kim <dg77.kim@samsung.com>
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Signed-off-by: Anton Vorontsov <cbouatmailru@gmail.com>
+(cherry picked from commit 3bb3dbbd56ea39e5537db8f8041ea95d28f16a7f)
+
+Conflicts:
+
+       drivers/power/Kconfig
+       drivers/power/Makefile
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/charger-manager.txt |  149 ++++++
+ drivers/power/Kconfig                   |   10 +
+ drivers/power/Makefile                  |    1 +
+ drivers/power/charger-manager.c         |  779 +++++++++++++++++++++++++++++++
+ include/linux/power/charger-manager.h   |  130 ++++++
+ 5 files changed, 1069 insertions(+)
+ create mode 100644 Documentation/power/charger-manager.txt
+ create mode 100644 drivers/power/charger-manager.c
+ create mode 100644 include/linux/power/charger-manager.h
+
+diff --git a/Documentation/power/charger-manager.txt b/Documentation/power/charger-manager.txt
+new file mode 100644
+index 0000000..081489f
+--- /dev/null
++++ b/Documentation/power/charger-manager.txt
+@@ -0,0 +1,149 @@
++Charger Manager
++      (C) 2011 MyungJoo Ham <myungjoo.ham@samsung.com>, GPL
++
++Charger Manager provides in-kernel battery charger management that
++requires temperature monitoring during suspend-to-RAM state
++and where each battery may have multiple chargers attached and the userland
++wants to look at the aggregated information of the multiple chargers.
++
++Charger Manager is a platform_driver with power-supply-class entries.
++An instance of Charger Manager (a platform-device created with Charger-Manager)
++represents an independent battery with chargers. If there are multiple
++batteries with their own chargers acting independently in a system,
++the system may need multiple instances of Charger Manager.
++
++1. Introduction
++===============
++
++Charger Manager supports the following:
++
++* Support for multiple chargers (e.g., a device with USB, AC, and solar panels)
++      A system may have multiple chargers (or power sources) and some of
++      they may be activated at the same time. Each charger may have its
++      own power-supply-class and each power-supply-class can provide
++      different information about the battery status. This framework
++      aggregates charger-related information from multiple sources and
++      shows combined information as a single power-supply-class.
++
++* Support for in suspend-to-RAM polling (with suspend_again callback)
++      While the battery is being charged and the system is in suspend-to-RAM,
++      we may need to monitor the battery health by looking at the ambient or
++      battery temperature. We can accomplish this by waking up the system
++      periodically. However, such a method wakes up devices unncessary for
++      monitoring the battery health and tasks, and user processes that are
++      supposed to be kept suspended. That, in turn, incurs unnecessary power
++      consumption and slow down charging process. Or even, such peak power
++      consumption can stop chargers in the middle of charging
++      (external power input < device power consumption), which not
++      only affects the charging time, but the lifespan of the battery.
++
++      Charger Manager provides a function "cm_suspend_again" that can be
++      used as suspend_again callback of platform_suspend_ops. If the platform
++      requires tasks other than cm_suspend_again, it may implement its own
++      suspend_again callback that calls cm_suspend_again in the middle.
++      Normally, the platform will need to resume and suspend some devices
++      that are used by Charger Manager.
++
++2. Global Charger-Manager Data related with suspend_again
++========================================================
++In order to setup Charger Manager with suspend-again feature
++(in-suspend monitoring), the user should provide charger_global_desc
++with setup_charger_manager(struct charger_global_desc *).
++This charger_global_desc data for in-suspend monitoring is global
++as the name suggests. Thus, the user needs to provide only once even
++if there are multiple batteries. If there are multiple batteries, the
++multiple instances of Charger Manager share the same charger_global_desc
++and it will manage in-suspend monitoring for all instances of Charger Manager.
++
++The user needs to provide all the two entries properly in order to activate
++in-suspend monitoring:
++
++struct charger_global_desc {
++
++char *rtc_name;
++      : The name of rtc (e.g., "rtc0") used to wakeup the system from
++      suspend for Charger Manager. The alarm interrupt (AIE) of the rtc
++      should be able to wake up the system from suspend. Charger Manager
++      saves and restores the alarm value and use the previously-defined
++      alarm if it is going to go off earlier than Charger Manager so that
++      Charger Manager does not interfere with previously-defined alarms.
++
++bool (*rtc_only_wakeup)(void);
++      : This callback should let CM know whether
++      the wakeup-from-suspend is caused only by the alarm of "rtc" in the
++      same struct. If there is any other wakeup source triggered the
++      wakeup, it should return false. If the "rtc" is the only wakeup
++      reason, it should return true.
++};
++
++3. How to setup suspend_again
++=============================
++Charger Manager provides a function "extern bool cm_suspend_again(void)".
++When cm_suspend_again is called, it monitors every battery. The suspend_ops
++callback of the system's platform_suspend_ops can call cm_suspend_again
++function to know whether Charger Manager wants to suspend again or not.
++If there are no other devices or tasks that want to use suspend_again
++feature, the platform_suspend_ops may directly refer to cm_suspend_again
++for its suspend_again callback.
++
++The cm_suspend_again() returns true (meaning "I want to suspend again")
++if the system was woken up by Charger Manager and the polling
++(in-suspend monitoring) results in "normal".
++
++4. Charger-Manager Data (struct charger_desc)
++=============================================
++For each battery charged independently from other batteries (if a series of
++batteries are charged by a single charger, they are counted as one independent
++battery), an instance of Charger Manager is attached to it.
++
++struct charger_desc {
++
++enum polling_modes polling_mode;
++      : CM_POLL_DISABLE: do not poll this battery.
++        CM_POLL_ALWAYS: always poll this battery.
++        CM_POLL_EXTERNAL_POWER_ONLY: poll this battery if and only if
++                                     an external power source is attached.
++        CM_POLL_CHARGING_ONLY: poll this battery if and only if the
++                               battery is being charged.
++
++unsigned int polling_interval_ms;
++      : Required polling interval in ms. Charger Manager will poll
++      this battery every polling_interval_ms or more frequently.
++
++enum data_source battery_present;
++      CM_FUEL_GAUGE: get battery presence information from fuel gauge.
++      CM_CHARGER_STAT: get battery presence from chargers.
++
++char **psy_charger_stat;
++      : An array ending with NULL that has power-supply-class names of
++      chargers. Each power-supply-class should provide "PRESENT" (if
++      battery_present is "CM_CHARGER_STAT"), "ONLINE" (shows whether an
++      external power source is attached or not), and "STATUS" (shows whether
++      the battery is {"FULL" or not FULL} or {"FULL", "Charging",
++      "Discharging", "NotCharging"}).
++
++int num_charger_regulators;
++struct regulator_bulk_data *charger_regulators;
++      : Regulators representing the chargers in the form for
++      regulator framework's bulk functions.
++
++char *psy_fuel_gauge;
++      : Power-supply-class name of the fuel gauge.
++
++int (*temperature_out_of_range)(int *mC);
++      : This callback returns 0 if the temperature is safe for charging,
++      a positive number if it is too hot to charge, and a negative number
++      if it is too cold to charge. With the variable mC, the callback returns
++      the temperature in 1/1000 of centigrade.
++};
++
++5. Other Considerations
++=======================
++
++At the charger/battery-related events such as battery-pulled-out,
++charger-pulled-out, charger-inserted, DCIN-over/under-voltage, charger-stopped,
++and others critical to chargers, the system should be configured to wake up.
++At least the following should wake up the system from a suspend:
++a) charger-on/off b) external-power-in/out c) battery-in/out (while charging)
++
++It is usually accomplished by configuring the PMIC as a wakeup source.
+diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
+index e57b50b..8266193 100644
+--- a/drivers/power/Kconfig
++++ b/drivers/power/Kconfig
+@@ -235,4 +235,14 @@ config CHARGER_GPIO
+         This driver can be build as a module. If so, the module will be
+         called gpio-charger.
++config CHARGER_MANAGER
++      bool "Battery charger manager for multiple chargers"
++      depends on REGULATOR && RTC_CLASS
++      help
++          Say Y to enable charger-manager support, which allows multiple
++          chargers attached to a battery and multiple batteries attached to a
++          system. The charger-manager also can monitor charging status in
++          runtime and in suspend-to-RAM by waking up the system periodically
++          with help of suspend_again support.
++
+ endif # POWER_SUPPLY
+diff --git a/drivers/power/Makefile b/drivers/power/Makefile
+index 009a90f..ca69761 100644
+--- a/drivers/power/Makefile
++++ b/drivers/power/Makefile
+@@ -36,3 +36,4 @@ obj-$(CONFIG_CHARGER_ISP1704)        += isp1704_charger.o
+ obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
+ obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
+ obj-$(CONFIG_CHARGER_GPIO)    += gpio-charger.o
++obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
+diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
+new file mode 100644
+index 0000000..727a259
+--- /dev/null
++++ b/drivers/power/charger-manager.c
+@@ -0,0 +1,779 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
++ * MyungJoo Ham <myungjoo.ham@samsung.com>
++ *
++ * This driver enables to monitor battery health and control charger
++ * during suspend-to-mem.
++ * Charger manager depends on other devices. register this later than
++ * the depending devices.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++**/
++
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/rtc.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++#include <linux/platform_device.h>
++#include <linux/power/charger-manager.h>
++#include <linux/regulator/consumer.h>
++
++/*
++ * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
++ * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
++ * without any delays.
++ */
++#define       CM_JIFFIES_SMALL        (2)
++
++/* If y is valid (> 0) and smaller than x, do x = y */
++#define CM_MIN_VALID(x, y)    x = (((y > 0) && ((x) > (y))) ? (y) : (x))
++
++/*
++ * Regard CM_RTC_SMALL (sec) is small enough to ignore error in invoking
++ * rtc alarm. It should be 2 or larger
++ */
++#define CM_RTC_SMALL          (2)
++
++#define UEVENT_BUF_SIZE               32
++
++static LIST_HEAD(cm_list);
++static DEFINE_MUTEX(cm_list_mtx);
++
++/* About in-suspend (suspend-again) monitoring */
++static struct rtc_device *rtc_dev;
++/*
++ * Backup RTC alarm
++ * Save the wakeup alarm before entering suspend-to-RAM
++ */
++static struct rtc_wkalrm rtc_wkalarm_save;
++/* Backup RTC alarm time in terms of seconds since 01-01-1970 00:00:00 */
++static unsigned long rtc_wkalarm_save_time;
++static bool cm_suspended;
++static bool cm_rtc_set;
++static unsigned long cm_suspend_duration_ms;
++
++/* Global charger-manager description */
++static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
++
++/**
++ * is_batt_present - See if the battery presents in place.
++ * @cm: the Charger Manager representing the battery.
++ */
++static bool is_batt_present(struct charger_manager *cm)
++{
++      union power_supply_propval val;
++      bool present = false;
++      int i, ret;
++
++      switch (cm->desc->battery_present) {
++      case CM_FUEL_GAUGE:
++              ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
++                              POWER_SUPPLY_PROP_PRESENT, &val);
++              if (ret == 0 && val.intval)
++                      present = true;
++              break;
++      case CM_CHARGER_STAT:
++              for (i = 0; cm->charger_stat[i]; i++) {
++                      ret = cm->charger_stat[i]->get_property(
++                                      cm->charger_stat[i],
++                                      POWER_SUPPLY_PROP_PRESENT, &val);
++                      if (ret == 0 && val.intval) {
++                              present = true;
++                              break;
++                      }
++              }
++              break;
++      }
++
++      return present;
++}
++
++/**
++ * is_ext_pwr_online - See if an external power source is attached to charge
++ * @cm: the Charger Manager representing the battery.
++ *
++ * Returns true if at least one of the chargers of the battery has an external
++ * power source attached to charge the battery regardless of whether it is
++ * actually charging or not.
++ */
++static bool is_ext_pwr_online(struct charger_manager *cm)
++{
++      union power_supply_propval val;
++      bool online = false;
++      int i, ret;
++
++      /* If at least one of them has one, it's yes. */
++      for (i = 0; cm->charger_stat[i]; i++) {
++              ret = cm->charger_stat[i]->get_property(
++                              cm->charger_stat[i],
++                              POWER_SUPPLY_PROP_ONLINE, &val);
++              if (ret == 0 && val.intval) {
++                      online = true;
++                      break;
++              }
++      }
++
++      return online;
++}
++
++/**
++ * is_charging - Returns true if the battery is being charged.
++ * @cm: the Charger Manager representing the battery.
++ */
++static bool is_charging(struct charger_manager *cm)
++{
++      int i, ret;
++      bool charging = false;
++      union power_supply_propval val;
++
++      /* If there is no battery, it cannot be charged */
++      if (!is_batt_present(cm))
++              return false;
++
++      /* If at least one of the charger is charging, return yes */
++      for (i = 0; cm->charger_stat[i]; i++) {
++              /* 1. The charger sholuld not be DISABLED */
++              if (cm->emergency_stop)
++                      continue;
++              if (!cm->charger_enabled)
++                      continue;
++
++              /* 2. The charger should be online (ext-power) */
++              ret = cm->charger_stat[i]->get_property(
++                              cm->charger_stat[i],
++                              POWER_SUPPLY_PROP_ONLINE, &val);
++              if (ret) {
++                      dev_warn(cm->dev, "Cannot read ONLINE value from %s.\n",
++                                      cm->desc->psy_charger_stat[i]);
++                      continue;
++              }
++              if (val.intval == 0)
++                      continue;
++
++              /*
++               * 3. The charger should not be FULL, DISCHARGING,
++               * or NOT_CHARGING.
++               */
++              ret = cm->charger_stat[i]->get_property(
++                              cm->charger_stat[i],
++                              POWER_SUPPLY_PROP_STATUS, &val);
++              if (ret) {
++                      dev_warn(cm->dev, "Cannot read STATUS value from %s.\n",
++                                      cm->desc->psy_charger_stat[i]);
++                      continue;
++              }
++              if (val.intval == POWER_SUPPLY_STATUS_FULL ||
++                              val.intval == POWER_SUPPLY_STATUS_DISCHARGING ||
++                              val.intval == POWER_SUPPLY_STATUS_NOT_CHARGING)
++                      continue;
++
++              /* Then, this is charging. */
++              charging = true;
++              break;
++      }
++
++      return charging;
++}
++
++/**
++ * is_polling_required - Return true if need to continue polling for this CM.
++ * @cm: the Charger Manager representing the battery.
++ */
++static bool is_polling_required(struct charger_manager *cm)
++{
++      switch (cm->desc->polling_mode) {
++      case CM_POLL_DISABLE:
++              return false;
++      case CM_POLL_ALWAYS:
++              return true;
++      case CM_POLL_EXTERNAL_POWER_ONLY:
++              return is_ext_pwr_online(cm);
++      case CM_POLL_CHARGING_ONLY:
++              return is_charging(cm);
++      default:
++              dev_warn(cm->dev, "Incorrect polling_mode (%d)\n",
++                      cm->desc->polling_mode);
++      }
++
++      return false;
++}
++
++/**
++ * try_charger_enable - Enable/Disable chargers altogether
++ * @cm: the Charger Manager representing the battery.
++ * @enable: true: enable / false: disable
++ *
++ * Note that Charger Manager keeps the charger enabled regardless whether
++ * the charger is charging or not (because battery is full or no external
++ * power source exists) except when CM needs to disable chargers forcibly
++ * bacause of emergency causes; when the battery is overheated or too cold.
++ */
++static int try_charger_enable(struct charger_manager *cm, bool enable)
++{
++      int err = 0, i;
++      struct charger_desc *desc = cm->desc;
++
++      /* Ignore if it's redundent command */
++      if (enable && cm->charger_enabled)
++              return 0;
++      if (!enable && !cm->charger_enabled)
++              return 0;
++
++      if (enable) {
++              if (cm->emergency_stop)
++                      return -EAGAIN;
++              err = regulator_bulk_enable(desc->num_charger_regulators,
++                                      desc->charger_regulators);
++      } else {
++              /*
++               * Abnormal battery state - Stop charging forcibly,
++               * even if charger was enabled at the other places
++               */
++              err = regulator_bulk_disable(desc->num_charger_regulators,
++                                      desc->charger_regulators);
++
++              for (i = 0; i < desc->num_charger_regulators; i++) {
++                      if (regulator_is_enabled(
++                                  desc->charger_regulators[i].consumer)) {
++                              regulator_force_disable(
++                                      desc->charger_regulators[i].consumer);
++                              dev_warn(cm->dev,
++                                      "Disable regulator(%s) forcibly.\n",
++                                      desc->charger_regulators[i].supply);
++                      }
++              }
++      }
++
++      if (!err)
++              cm->charger_enabled = enable;
++
++      return err;
++}
++
++/**
++ * uevent_notify - Let users know something has changed.
++ * @cm: the Charger Manager representing the battery.
++ * @event: the event string.
++ *
++ * If @event is null, it implies that uevent_notify is called
++ * by resume function. When called in the resume function, cm_suspended
++ * should be already reset to false in order to let uevent_notify
++ * notify the recent event during the suspend to users. While
++ * suspended, uevent_notify does not notify users, but tracks
++ * events so that uevent_notify can notify users later after resumed.
++ */
++static void uevent_notify(struct charger_manager *cm, const char *event)
++{
++      static char env_str[UEVENT_BUF_SIZE + 1] = "";
++      static char env_str_save[UEVENT_BUF_SIZE + 1] = "";
++
++      if (cm_suspended) {
++              /* Nothing in suspended-event buffer */
++              if (env_str_save[0] == 0) {
++                      if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
++                              return; /* status not changed */
++                      strncpy(env_str_save, event, UEVENT_BUF_SIZE);
++                      return;
++              }
++
++              if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
++                      return; /* Duplicated. */
++              else
++                      strncpy(env_str_save, event, UEVENT_BUF_SIZE);
++
++              return;
++      }
++
++      if (event == NULL) {
++              /* No messages pending */
++              if (!env_str_save[0])
++                      return;
++
++              strncpy(env_str, env_str_save, UEVENT_BUF_SIZE);
++              kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
++              env_str_save[0] = 0;
++
++              return;
++      }
++
++      /* status not changed */
++      if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
++              return;
++
++      /* save the status and notify the update */
++      strncpy(env_str, event, UEVENT_BUF_SIZE);
++      kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
++
++      dev_info(cm->dev, event);
++}
++
++/**
++ * _cm_monitor - Monitor the temperature and return true for exceptions.
++ * @cm: the Charger Manager representing the battery.
++ *
++ * Returns true if there is an event to notify for the battery.
++ * (True if the status of "emergency_stop" changes)
++ */
++static bool _cm_monitor(struct charger_manager *cm)
++{
++      struct charger_desc *desc = cm->desc;
++      int temp = desc->temperature_out_of_range(&cm->last_temp_mC);
++
++      dev_dbg(cm->dev, "monitoring (%2.2d.%3.3dC)\n",
++              cm->last_temp_mC / 1000, cm->last_temp_mC % 1000);
++
++      /* It has been stopped or charging already */
++      if (!!temp == !!cm->emergency_stop)
++              return false;
++
++      if (temp) {
++              cm->emergency_stop = temp;
++              if (!try_charger_enable(cm, false)) {
++                      if (temp > 0)
++                              uevent_notify(cm, "OVERHEAT");
++                      else
++                              uevent_notify(cm, "COLD");
++              }
++      } else {
++              cm->emergency_stop = 0;
++              if (!try_charger_enable(cm, true))
++                      uevent_notify(cm, "CHARGING");
++      }
++
++      return true;
++}
++
++/**
++ * cm_monitor - Monitor every battery.
++ *
++ * Returns true if there is an event to notify from any of the batteries.
++ * (True if the status of "emergency_stop" changes)
++ */
++static bool cm_monitor(void)
++{
++      bool stop = false;
++      struct charger_manager *cm;
++
++      mutex_lock(&cm_list_mtx);
++
++      list_for_each_entry(cm, &cm_list, entry)
++              stop = stop || _cm_monitor(cm);
++
++      mutex_unlock(&cm_list_mtx);
++
++      return stop;
++}
++
++/**
++ * cm_setup_timer - For in-suspend monitoring setup wakeup alarm
++ *                for suspend_again.
++ *
++ * Returns true if the alarm is set for Charger Manager to use.
++ * Returns false if
++ *    cm_setup_timer fails to set an alarm,
++ *    cm_setup_timer does not need to set an alarm for Charger Manager,
++ *    or an alarm previously configured is to be used.
++ */
++static bool cm_setup_timer(void)
++{
++      struct charger_manager *cm;
++      unsigned int wakeup_ms = UINT_MAX;
++      bool ret = false;
++
++      mutex_lock(&cm_list_mtx);
++
++      list_for_each_entry(cm, &cm_list, entry) {
++              /* Skip if polling is not required for this CM */
++              if (!is_polling_required(cm) && !cm->emergency_stop)
++                      continue;
++              if (cm->desc->polling_interval_ms == 0)
++                      continue;
++              CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms);
++      }
++
++      mutex_unlock(&cm_list_mtx);
++
++      if (wakeup_ms < UINT_MAX && wakeup_ms > 0) {
++              pr_info("Charger Manager wakeup timer: %u ms.\n", wakeup_ms);
++              if (rtc_dev) {
++                      struct rtc_wkalrm tmp;
++                      unsigned long time, now;
++                      unsigned long add = DIV_ROUND_UP(wakeup_ms, 1000);
++
++                      /*
++                       * Set alarm with the polling interval (wakeup_ms)
++                       * except when rtc_wkalarm_save comes first.
++                       * However, the alarm time should be NOW +
++                       * CM_RTC_SMALL or later.
++                       */
++                      tmp.enabled = 1;
++                      rtc_read_time(rtc_dev, &tmp.time);
++                      rtc_tm_to_time(&tmp.time, &now);
++                      if (add < CM_RTC_SMALL)
++                              add = CM_RTC_SMALL;
++                      time = now + add;
++
++                      ret = true;
++
++                      if (rtc_wkalarm_save.enabled &&
++                          rtc_wkalarm_save_time &&
++                          rtc_wkalarm_save_time < time) {
++                              if (rtc_wkalarm_save_time < now + CM_RTC_SMALL)
++                                      time = now + CM_RTC_SMALL;
++                              else
++                                      time = rtc_wkalarm_save_time;
++
++                              /* The timer is not appointed by CM */
++                              ret = false;
++                      }
++
++                      pr_info("Waking up after %lu secs.\n",
++                                      time - now);
++
++                      rtc_time_to_tm(time, &tmp.time);
++                      rtc_set_alarm(rtc_dev, &tmp);
++                      cm_suspend_duration_ms += wakeup_ms;
++                      return ret;
++              }
++      }
++
++      if (rtc_dev)
++              rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
++      return false;
++}
++
++/**
++ * cm_suspend_again - Determine whether suspend again or not
++ *
++ * Returns true if the system should be suspended again
++ * Returns false if the system should be woken up
++ */
++bool cm_suspend_again(void)
++{
++      struct charger_manager *cm;
++      bool ret = false;
++
++      if (!g_desc || !g_desc->rtc_only_wakeup || !g_desc->rtc_only_wakeup() ||
++          !cm_rtc_set)
++              return false;
++
++      if (cm_monitor())
++              goto out;
++
++      ret = true;
++      mutex_lock(&cm_list_mtx);
++      list_for_each_entry(cm, &cm_list, entry) {
++              if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
++                  cm->status_save_batt != is_batt_present(cm))
++                      ret = false;
++      }
++      mutex_unlock(&cm_list_mtx);
++
++      cm_rtc_set = cm_setup_timer();
++out:
++      /* It's about the time when the non-CM appointed timer goes off */
++      if (rtc_wkalarm_save.enabled) {
++              unsigned long now;
++              struct rtc_time tmp;
++
++              rtc_read_time(rtc_dev, &tmp);
++              rtc_tm_to_time(&tmp, &now);
++
++              if (rtc_wkalarm_save_time &&
++                  now + CM_RTC_SMALL >= rtc_wkalarm_save_time)
++                      return false;
++      }
++      return ret;
++}
++EXPORT_SYMBOL_GPL(cm_suspend_again);
++
++/**
++ * setup_charger_manager - initialize charger_global_desc data
++ * @gd: pointer to instance of charger_global_desc
++ */
++int setup_charger_manager(struct charger_global_desc *gd)
++{
++      if (!gd)
++              return -EINVAL;
++
++      if (rtc_dev)
++              rtc_class_close(rtc_dev);
++      rtc_dev = NULL;
++      g_desc = NULL;
++
++      if (!gd->rtc_only_wakeup) {
++              pr_err("The callback rtc_only_wakeup is not given.\n");
++              return -EINVAL;
++      }
++
++      if (gd->rtc_name) {
++              rtc_dev = rtc_class_open(gd->rtc_name);
++              if (IS_ERR_OR_NULL(rtc_dev)) {
++                      rtc_dev = NULL;
++                      /* Retry at probe. RTC may be not registered yet */
++              }
++      } else {
++              pr_warn("No wakeup timer is given for charger manager."
++                      "In-suspend monitoring won't work.\n");
++      }
++
++      g_desc = gd;
++      return 0;
++}
++EXPORT_SYMBOL_GPL(setup_charger_manager);
++
++static int charger_manager_probe(struct platform_device *pdev)
++{
++      struct charger_desc *desc = dev_get_platdata(&pdev->dev);
++      struct charger_manager *cm;
++      int ret = 0, i = 0;
++
++      if (g_desc && !rtc_dev && g_desc->rtc_name) {
++              rtc_dev = rtc_class_open(g_desc->rtc_name);
++              if (IS_ERR_OR_NULL(rtc_dev)) {
++                      rtc_dev = NULL;
++                      dev_err(&pdev->dev, "Cannot get RTC %s.\n",
++                              g_desc->rtc_name);
++                      ret = -ENODEV;
++                      goto err_alloc;
++              }
++      }
++
++      if (!desc) {
++              dev_err(&pdev->dev, "No platform data (desc) found.\n");
++              ret = -ENODEV;
++              goto err_alloc;
++      }
++
++      cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL);
++      if (!cm) {
++              dev_err(&pdev->dev, "Cannot allocate memory.\n");
++              ret = -ENOMEM;
++              goto err_alloc;
++      }
++
++      /* Basic Values. Unspecified are Null or 0 */
++      cm->dev = &pdev->dev;
++      cm->desc = kzalloc(sizeof(struct charger_desc), GFP_KERNEL);
++      if (!cm->desc) {
++              dev_err(&pdev->dev, "Cannot allocate memory.\n");
++              ret = -ENOMEM;
++              goto err_alloc_desc;
++      }
++      memcpy(cm->desc, desc, sizeof(struct charger_desc));
++      cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
++
++      if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
++              ret = -EINVAL;
++              dev_err(&pdev->dev, "charger_regulators undefined.\n");
++              goto err_no_charger;
++      }
++
++      if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) {
++              dev_err(&pdev->dev, "No power supply defined.\n");
++              ret = -EINVAL;
++              goto err_no_charger_stat;
++      }
++
++      /* Counting index only */
++      while (desc->psy_charger_stat[i])
++              i++;
++
++      cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1),
++                                 GFP_KERNEL);
++      if (!cm->charger_stat) {
++              ret = -ENOMEM;
++              goto err_no_charger_stat;
++      }
++
++      for (i = 0; desc->psy_charger_stat[i]; i++) {
++              cm->charger_stat[i] = power_supply_get_by_name(
++                                      desc->psy_charger_stat[i]);
++              if (!cm->charger_stat[i]) {
++                      dev_err(&pdev->dev, "Cannot find power supply "
++                                      "\"%s\"\n",
++                                      desc->psy_charger_stat[i]);
++                      ret = -ENODEV;
++                      goto err_chg_stat;
++              }
++      }
++
++      cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
++      if (!cm->fuel_gauge) {
++              dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
++                              desc->psy_fuel_gauge);
++              ret = -ENODEV;
++              goto err_chg_stat;
++      }
++
++      if (desc->polling_interval_ms == 0 ||
++          msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) {
++              dev_err(&pdev->dev, "polling_interval_ms is too small\n");
++              ret = -EINVAL;
++              goto err_chg_stat;
++      }
++
++      if (!desc->temperature_out_of_range) {
++              dev_err(&pdev->dev, "there is no temperature_out_of_range\n");
++              ret = -EINVAL;
++              goto err_chg_stat;
++      }
++
++      platform_set_drvdata(pdev, cm);
++
++      ret = regulator_bulk_get(&pdev->dev, desc->num_charger_regulators,
++                               desc->charger_regulators);
++      if (ret) {
++              dev_err(&pdev->dev, "Cannot get charger regulators.\n");
++              goto err_chg_stat;
++      }
++
++      ret = try_charger_enable(cm, true);
++      if (ret) {
++              dev_err(&pdev->dev, "Cannot enable charger regulators\n");
++              goto err_chg_enable;
++      }
++
++      /* Add to the list */
++      mutex_lock(&cm_list_mtx);
++      list_add(&cm->entry, &cm_list);
++      mutex_unlock(&cm_list_mtx);
++
++      return 0;
++
++err_chg_enable:
++      if (desc->charger_regulators)
++              regulator_bulk_free(desc->num_charger_regulators,
++                                      desc->charger_regulators);
++err_chg_stat:
++      kfree(cm->charger_stat);
++err_no_charger_stat:
++err_no_charger:
++      kfree(cm->desc);
++err_alloc_desc:
++      kfree(cm);
++err_alloc:
++      return ret;
++}
++
++static int __devexit charger_manager_remove(struct platform_device *pdev)
++{
++      struct charger_manager *cm = platform_get_drvdata(pdev);
++      struct charger_desc *desc = cm->desc;
++
++      /* Remove from the list */
++      mutex_lock(&cm_list_mtx);
++      list_del(&cm->entry);
++      mutex_unlock(&cm_list_mtx);
++
++      if (desc->charger_regulators)
++              regulator_bulk_free(desc->num_charger_regulators,
++                                      desc->charger_regulators);
++      kfree(cm->charger_stat);
++      kfree(cm->desc);
++      kfree(cm);
++
++      return 0;
++}
++
++const struct platform_device_id charger_manager_id[] = {
++      { "charger-manager", 0 },
++      { },
++};
++
++static int cm_suspend_prepare(struct device *dev)
++{
++      struct platform_device *pdev = container_of(dev, struct platform_device,
++                                                  dev);
++      struct charger_manager *cm = platform_get_drvdata(pdev);
++
++      if (!cm_suspended) {
++              if (rtc_dev) {
++                      struct rtc_time tmp;
++                      unsigned long now;
++
++                      rtc_read_alarm(rtc_dev, &rtc_wkalarm_save);
++                      rtc_read_time(rtc_dev, &tmp);
++
++                      if (rtc_wkalarm_save.enabled) {
++                              rtc_tm_to_time(&rtc_wkalarm_save.time,
++                                             &rtc_wkalarm_save_time);
++                              rtc_tm_to_time(&tmp, &now);
++                              if (now > rtc_wkalarm_save_time)
++                                      rtc_wkalarm_save_time = 0;
++                      } else {
++                              rtc_wkalarm_save_time = 0;
++                      }
++              }
++              cm_suspended = true;
++      }
++
++      cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
++      cm->status_save_batt = is_batt_present(cm);
++
++      if (!cm_rtc_set) {
++              cm_suspend_duration_ms = 0;
++              cm_rtc_set = cm_setup_timer();
++      }
++
++      return 0;
++}
++
++static void cm_suspend_complete(struct device *dev)
++{
++      struct platform_device *pdev = container_of(dev, struct platform_device,
++                                                  dev);
++      struct charger_manager *cm = platform_get_drvdata(pdev);
++
++      if (cm_suspended) {
++              if (rtc_dev) {
++                      struct rtc_wkalrm tmp;
++
++                      rtc_read_alarm(rtc_dev, &tmp);
++                      rtc_wkalarm_save.pending = tmp.pending;
++                      rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
++              }
++              cm_suspended = false;
++              cm_rtc_set = false;
++      }
++
++      uevent_notify(cm, NULL);
++}
++
++static const struct dev_pm_ops charger_manager_pm = {
++      .prepare        = cm_suspend_prepare,
++      .complete       = cm_suspend_complete,
++};
++
++static struct platform_driver charger_manager_driver = {
++      .driver = {
++              .name = "charger-manager",
++              .owner = THIS_MODULE,
++              .pm = &charger_manager_pm,
++      },
++      .probe = charger_manager_probe,
++      .remove = __devexit_p(charger_manager_remove),
++      .id_table = charger_manager_id,
++};
++
++static int __init charger_manager_init(void)
++{
++      return platform_driver_register(&charger_manager_driver);
++}
++late_initcall(charger_manager_init);
++
++static void __exit charger_manager_cleanup(void)
++{
++      platform_driver_unregister(&charger_manager_driver);
++}
++module_exit(charger_manager_cleanup);
++
++MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
++MODULE_DESCRIPTION("Charger Manager");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("charger-manager");
+diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
+new file mode 100644
+index 0000000..102c5b3
+--- /dev/null
++++ b/include/linux/power/charger-manager.h
+@@ -0,0 +1,130 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
++ * MyungJoo.Ham <myungjoo.ham@samsung.com>
++ *
++ * Charger Manager.
++ * This framework enables to control and multiple chargers and to
++ * monitor charging even in the context of suspend-to-RAM with
++ * an interface combining the chargers.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++**/
++
++#ifndef _CHARGER_MANAGER_H
++#define _CHARGER_MANAGER_H
++
++#include <linux/power_supply.h>
++
++enum data_source {
++      CM_FUEL_GAUGE,
++      CM_CHARGER_STAT,
++};
++
++enum polling_modes {
++      CM_POLL_DISABLE = 0,
++      CM_POLL_ALWAYS,
++      CM_POLL_EXTERNAL_POWER_ONLY,
++      CM_POLL_CHARGING_ONLY,
++};
++
++/**
++ * struct charger_global_desc
++ * @rtc_name: the name of RTC used to wake up the system from suspend.
++ * @rtc_only_wakeup:
++ *    If the system is woken up by waekup-sources other than the RTC or
++ *    callbacks, Charger Manager should recognize with
++ *    rtc_only_wakeup() returning false.
++ *    If the RTC given to CM is the only wakeup reason,
++ *    rtc_only_wakeup should return true.
++ */
++struct charger_global_desc {
++      char *rtc_name;
++
++      bool (*rtc_only_wakeup)(void);
++};
++
++/**
++ * struct charger_desc
++ * @polling_mode:
++ *    Determine which polling mode will be used
++ * @polling_interval_ms: interval in millisecond at which
++ *    charger manager will monitor battery health
++ * @battery_present:
++ *    Specify where information for existance of battery can be obtained
++ * @psy_charger_stat: the names of power-supply for chargers
++ * @num_charger_regulator: the number of entries in charger_regulators
++ * @charger_regulators: array of regulator_bulk_data for chargers
++ * @psy_fuel_gauge: the name of power-supply for fuel gauge
++ * @temperature_out_of_range:
++ *    Determine whether the status is overheat or cold or normal.
++ *    return_value > 0: overheat
++ *    return_value == 0: normal
++ *    return_value < 0: cold
++ */
++struct charger_desc {
++      enum polling_modes polling_mode;
++      unsigned int polling_interval_ms;
++
++      enum data_source battery_present;
++
++      char **psy_charger_stat;
++
++      int num_charger_regulators;
++      struct regulator_bulk_data *charger_regulators;
++
++      char *psy_fuel_gauge;
++
++      int (*temperature_out_of_range)(int *mC);
++};
++
++#define PSY_NAME_MAX  30
++
++/**
++ * struct charger_manager
++ * @entry: entry for list
++ * @dev: device pointer
++ * @desc: instance of charger_desc
++ * @fuel_gauge: power_supply for fuel gauge
++ * @charger_stat: array of power_supply for chargers
++ * @charger_enabled: the state of charger
++ * @emergency_stop:
++ *    When setting true, stop charging
++ * @last_temp_mC: the measured temperature in milli-Celsius
++ * @status_save_ext_pwr_inserted:
++ *    saved status of external power before entering suspend-to-RAM
++ * @status_save_batt:
++ *    saved status of battery before entering suspend-to-RAM
++ */
++struct charger_manager {
++      struct list_head entry;
++      struct device *dev;
++      struct charger_desc *desc;
++
++      struct power_supply *fuel_gauge;
++      struct power_supply **charger_stat;
++
++      bool charger_enabled;
++
++      int emergency_stop;
++      int last_temp_mC;
++
++      bool status_save_ext_pwr_inserted;
++      bool status_save_batt;
++};
++
++#ifdef CONFIG_CHARGER_MANAGER
++extern int setup_charger_manager(struct charger_global_desc *gd);
++extern bool cm_suspend_again(void);
++#else
++static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd)
++{ }
++
++static bool __maybe_unused cm_suspend_again(void)
++{
++      return false;
++}
++#endif
++
++#endif /* _CHARGER_MANAGER_H */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0164-PM-Hibernate-Implement-compat_ioctl-for-dev-snapshot.patch b/patches.runtime_pm/0164-PM-Hibernate-Implement-compat_ioctl-for-dev-snapshot.patch
new file mode 100644 (file)
index 0000000..11b5e74
--- /dev/null
@@ -0,0 +1,110 @@
+From e43993ecd9db0643ab3b09d3f228a15dfe4107d4 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Tue, 27 Dec 2011 22:54:52 +0100
+Subject: PM / Hibernate: Implement compat_ioctl for /dev/snapshot
+
+This allows uswsusp built for i386 to run on an x86_64 kernel (tested
+with Debian package version 1.0+20110509-2).
+
+References: http://bugs.debian.org/502816
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c336078bf65c4d38caa9a4b8b7b7261c778e622c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/user.c |   64 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 64 insertions(+)
+
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 78bdb44..6b1ab7a 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -21,6 +21,7 @@
+ #include <linux/swapops.h>
+ #include <linux/pm.h>
+ #include <linux/fs.h>
++#include <linux/compat.h>
+ #include <linux/console.h>
+ #include <linux/cpu.h>
+ #include <linux/freezer.h>
+@@ -380,6 +381,66 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+       return error;
+ }
++#ifdef CONFIG_COMPAT
++
++struct compat_resume_swap_area {
++      compat_loff_t offset;
++      u32 dev;
++} __packed;
++
++static long
++snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++      BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
++
++      switch (cmd) {
++      case SNAPSHOT_GET_IMAGE_SIZE:
++      case SNAPSHOT_AVAIL_SWAP_SIZE:
++      case SNAPSHOT_ALLOC_SWAP_PAGE: {
++              compat_loff_t __user *uoffset = compat_ptr(arg);
++              loff_t offset;
++              mm_segment_t old_fs;
++              int err;
++
++              old_fs = get_fs();
++              set_fs(KERNEL_DS);
++              err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
++              set_fs(old_fs);
++              if (!err && put_user(offset, uoffset))
++                      err = -EFAULT;
++              return err;
++      }
++
++      case SNAPSHOT_CREATE_IMAGE:
++              return snapshot_ioctl(file, cmd,
++                                    (unsigned long) compat_ptr(arg));
++
++      case SNAPSHOT_SET_SWAP_AREA: {
++              struct compat_resume_swap_area __user *u_swap_area =
++                      compat_ptr(arg);
++              struct resume_swap_area swap_area;
++              mm_segment_t old_fs;
++              int err;
++
++              err = get_user(swap_area.offset, &u_swap_area->offset);
++              err |= get_user(swap_area.dev, &u_swap_area->dev);
++              if (err)
++                      return -EFAULT;
++              old_fs = get_fs();
++              set_fs(KERNEL_DS);
++              err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
++                                   (unsigned long) &swap_area);
++              set_fs(old_fs);
++              return err;
++      }
++
++      default:
++              return snapshot_ioctl(file, cmd, arg);
++      }
++}
++
++#endif /* CONFIG_COMPAT */
++
+ static const struct file_operations snapshot_fops = {
+       .open = snapshot_open,
+       .release = snapshot_release,
+@@ -387,6 +448,9 @@ static const struct file_operations snapshot_fops = {
+       .write = snapshot_write,
+       .llseek = no_llseek,
+       .unlocked_ioctl = snapshot_ioctl,
++#ifdef CONFIG_COMPAT
++      .compat_ioctl = snapshot_compat_ioctl,
++#endif
+ };
+ static struct miscdevice snapshot_device = {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0165-mm-more-intensive-memory-corruption-debugging.patch b/patches.runtime_pm/0165-mm-more-intensive-memory-corruption-debugging.patch
new file mode 100644 (file)
index 0000000..d0fa66c
--- /dev/null
@@ -0,0 +1,266 @@
+From b836013360f4a4faf35861534bba54fe7dc1538f Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 10 Jan 2012 15:07:28 -0800
+Subject: mm: more intensive memory corruption debugging
+
+With CONFIG_DEBUG_PAGEALLOC configured, the CPU will generate an exception
+on access (read,write) to an unallocated page, which permits us to catch
+code which corrupts memory.  However the kernel is trying to maximise
+memory usage, hence there are usually few free pages in the system and
+buggy code usually corrupts some crucial data.
+
+This patch changes the buddy allocator to keep more free/protected pages
+and to interlace free/protected and allocated pages to increase the
+probability of catching corruption.
+
+When the kernel is compiled with CONFIG_DEBUG_PAGEALLOC,
+debug_guardpage_minorder defines the minimum order used by the page
+allocator to grant a request.  The requested size will be returned with
+the remaining pages used as guard pages.
+
+The default value of debug_guardpage_minorder is zero: no change from
+current behaviour.
+
+[akpm@linux-foundation.org: tweak documentation, s/flg/flag/]
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
+Cc: Christoph Lameter <cl@linux-foundation.org>
+Cc: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit c0a32fc5a2e470d0b02597b23ad79a317735253e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/kernel-parameters.txt |   19 +++++++++
+ include/linux/mm.h                  |   17 ++++++++
+ include/linux/page-debug-flags.h    |    4 +-
+ mm/Kconfig.debug                    |    5 +++
+ mm/page_alloc.c                     |   75 ++++++++++++++++++++++++++++++++---
+ 5 files changed, 113 insertions(+), 7 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 09a5f8a..400b7be 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -599,6 +599,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+       no_debug_objects
+                       [KNL] Disable object debugging
++      debug_guardpage_minorder=
++                      [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
++                      parameter allows control of the order of pages that will
++                      be intentionally kept free (and hence protected) by the
++                      buddy allocator. Bigger value increase the probability
++                      of catching random memory corruption, but reduce the
++                      amount of memory for normal system use. The maximum
++                      possible value is MAX_ORDER/2.  Setting this parameter
++                      to 1 or 2 should be enough to identify most random
++                      memory corruption problems caused by bugs in kernel or
++                      driver code when a CPU writes to (or reads from) a
++                      random memory location. Note that there exists a class
++                      of memory corruptions problems caused by buggy H/W or
++                      F/W or by drivers badly programing DMA (basically when
++                      memory is written at bus level and the CPU MMU is
++                      bypassed) which are not detectable by
++                      CONFIG_DEBUG_PAGEALLOC, hence this option will not help
++                      tracking down these problems.
++
+       debugpat        [X86] Enable PAT debugging
+       decnet.addr=    [HW,NET]
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 18eea05..988b049 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1660,5 +1660,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
+                               unsigned int pages_per_huge_page);
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
++#ifdef CONFIG_DEBUG_PAGEALLOC
++extern unsigned int _debug_guardpage_minorder;
++
++static inline unsigned int debug_guardpage_minorder(void)
++{
++      return _debug_guardpage_minorder;
++}
++
++static inline bool page_is_guard(struct page *page)
++{
++      return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
++}
++#else
++static inline unsigned int debug_guardpage_minorder(void) { return 0; }
++static inline bool page_is_guard(struct page *page) { return false; }
++#endif /* CONFIG_DEBUG_PAGEALLOC */
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
+index b0638fd..22691f61 100644
+--- a/include/linux/page-debug-flags.h
++++ b/include/linux/page-debug-flags.h
+@@ -13,6 +13,7 @@
+ enum page_debug_flags {
+       PAGE_DEBUG_FLAG_POISON,         /* Page is poisoned */
++      PAGE_DEBUG_FLAG_GUARD,
+ };
+ /*
+@@ -21,7 +22,8 @@ enum page_debug_flags {
+  */
+ #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
+-#if !defined(CONFIG_PAGE_POISONING) \
++#if !defined(CONFIG_PAGE_POISONING) && \
++    !defined(CONFIG_PAGE_GUARD) \
+ /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
+ #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
+ #endif
+diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
+index 8b1a477..4b24432 100644
+--- a/mm/Kconfig.debug
++++ b/mm/Kconfig.debug
+@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC
+       depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
+       depends on !KMEMCHECK
+       select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
++      select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
+       ---help---
+         Unmap pages from the kernel linear mapping after free_pages().
+         This results in a large slowdown, but helps to find certain types
+@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS
+ config PAGE_POISONING
+       bool
+       select WANT_PAGE_DEBUG_FLAGS
++
++config PAGE_GUARD
++      bool
++      select WANT_PAGE_DEBUG_FLAGS
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 947a7e9..b9995d6 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/memcontrol.h>
+ #include <linux/prefetch.h>
++#include <linux/page-debug-flags.h>
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -402,6 +403,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
+               clear_highpage(page + i);
+ }
++#ifdef CONFIG_DEBUG_PAGEALLOC
++unsigned int _debug_guardpage_minorder;
++
++static int __init debug_guardpage_minorder_setup(char *buf)
++{
++      unsigned long res;
++
++      if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
++              printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
++              return 0;
++      }
++      _debug_guardpage_minorder = res;
++      printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
++      return 0;
++}
++__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
++
++static inline void set_page_guard_flag(struct page *page)
++{
++      __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
++}
++
++static inline void clear_page_guard_flag(struct page *page)
++{
++      __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
++}
++#else
++static inline void set_page_guard_flag(struct page *page) { }
++static inline void clear_page_guard_flag(struct page *page) { }
++#endif
++
+ static inline void set_page_order(struct page *page, int order)
+ {
+       set_page_private(page, order);
+@@ -459,6 +491,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
+       if (page_zone_id(page) != page_zone_id(buddy))
+               return 0;
++      if (page_is_guard(buddy) && page_order(buddy) == order) {
++              VM_BUG_ON(page_count(buddy) != 0);
++              return 1;
++      }
++
+       if (PageBuddy(buddy) && page_order(buddy) == order) {
+               VM_BUG_ON(page_count(buddy) != 0);
+               return 1;
+@@ -515,11 +552,19 @@ static inline void __free_one_page(struct page *page,
+               buddy = page + (buddy_idx - page_idx);
+               if (!page_is_buddy(page, buddy, order))
+                       break;
+-
+-              /* Our buddy is free, merge with it and move up one order. */
+-              list_del(&buddy->lru);
+-              zone->free_area[order].nr_free--;
+-              rmv_page_order(buddy);
++              /*
++               * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
++               * merge with it and move up one order.
++               */
++              if (page_is_guard(buddy)) {
++                      clear_page_guard_flag(buddy);
++                      set_page_private(page, 0);
++                      __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
++              } else {
++                      list_del(&buddy->lru);
++                      zone->free_area[order].nr_free--;
++                      rmv_page_order(buddy);
++              }
+               combined_idx = buddy_idx & page_idx;
+               page = page + (combined_idx - page_idx);
+               page_idx = combined_idx;
+@@ -745,6 +790,23 @@ static inline void expand(struct zone *zone, struct page *page,
+               high--;
+               size >>= 1;
+               VM_BUG_ON(bad_range(zone, &page[size]));
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++              if (high < debug_guardpage_minorder()) {
++                      /*
++                       * Mark as guard pages (or page), that will allow to
++                       * merge back to allocator when buddy will be freed.
++                       * Corresponding page table entries will not be touched,
++                       * pages will stay not present in virtual address space
++                       */
++                      INIT_LIST_HEAD(&page[size].lru);
++                      set_page_guard_flag(&page[size]);
++                      set_page_private(&page[size], high);
++                      /* Guard pages are not available for any usage */
++                      __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
++                      continue;
++              }
++#endif
+               list_add(&page[size].lru, &area->free_list[migratetype]);
+               area->nr_free++;
+               set_page_order(&page[size], high);
+@@ -1774,7 +1836,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
+       va_list args;
+       unsigned int filter = SHOW_MEM_FILTER_NODES;
+-      if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
++      if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
++          debug_guardpage_minorder() > 0)
+               return;
+       /*
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0166-PM-Hibernate-do-not-count-debug-pages-as-savable.patch b/patches.runtime_pm/0166-PM-Hibernate-do-not-count-debug-pages-as-savable.patch
new file mode 100644 (file)
index 0000000..fffd5ac
--- /dev/null
@@ -0,0 +1,56 @@
+From e422e7ec788f488a7815d3cbb23b88cba9735691 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 10 Jan 2012 15:07:31 -0800
+Subject: PM/Hibernate: do not count debug pages as savable
+
+When debugging with CONFIG_DEBUG_PAGEALLOC and debug_guardpage_minorder >
+0, we have lot of free pages that are not marked so.  Snapshot code
+account them as savable, what cause hibernate memory preallocation
+failure.
+
+It is pretty hard to make hibernate allocation succeed with
+debug_guardpage_minorder=1.  This change at least make it possible when
+system has relatively big amount of RAM.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Christoph Lameter <cl@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Pekka Enberg <penberg@cs.helsinki.fi>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit c6968e73b90c2a2fb9a32d4bad249f8f70f70125)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/snapshot.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index cbe2c14..1cf8890 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -858,6 +858,9 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
+           PageReserved(page))
+               return NULL;
++      if (page_is_guard(page))
++              return NULL;
++
+       return page;
+ }
+@@ -920,6 +923,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
+           && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
+               return NULL;
++      if (page_is_guard(page))
++              return NULL;
++
+       return page;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0167-power_supply-Charger-Manager-Add-properties-for-powe.patch b/patches.runtime_pm/0167-power_supply-Charger-Manager-Add-properties-for-powe.patch
new file mode 100644 (file)
index 0000000..815f3f2
--- /dev/null
@@ -0,0 +1,475 @@
+From e3ac50aa08a8a926883d7d319abbe1924f0c0ddf Mon Sep 17 00:00:00 2001
+From: Donggeun Kim <dg77.kim@samsung.com>
+Date: Tue, 27 Dec 2011 18:47:49 +0900
+Subject: power_supply: Charger-Manager: Add properties for power-supply-class
+
+Charger Manager provides power-supply-class aggregating
+information from multiple chargers and a fuel-gauge.
+
+Signed-off-by: Donggeun Kim <dg77.kim@samsung.com>
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Signed-off-by: Anton Vorontsov <cbouatmailru@gmail.com>
+(cherry picked from commit ad3d13eee78ec44194bf919a37e2f711e53cbdf0)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/charger-manager.txt |   14 ++
+ drivers/power/charger-manager.c         |  295 ++++++++++++++++++++++++++++++-
+ include/linux/power/charger-manager.h   |   17 ++
+ 3 files changed, 325 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/power/charger-manager.txt b/Documentation/power/charger-manager.txt
+index 081489f..fdcca99 100644
+--- a/Documentation/power/charger-manager.txt
++++ b/Documentation/power/charger-manager.txt
+@@ -98,6 +98,11 @@ battery), an instance of Charger Manager is attached to it.
+ struct charger_desc {
++char *psy_name;
++      : The power-supply-class name of the battery. Default is
++      "battery" if psy_name is NULL. Users can access the psy entries
++      at "/sys/class/power_supply/[psy_name]/".
++
+ enum polling_modes polling_mode;
+       : CM_POLL_DISABLE: do not poll this battery.
+         CM_POLL_ALWAYS: always poll this battery.
+@@ -106,6 +111,12 @@ enum polling_modes polling_mode;
+         CM_POLL_CHARGING_ONLY: poll this battery if and only if the
+                                battery is being charged.
++unsigned int fullbatt_uV;
++      : If specified with a non-zero value, Charger Manager assumes
++      that the battery is full (capacity = 100) if the battery is not being
++      charged and the battery voltage is equal to or greater than
++      fullbatt_uV.
++
+ unsigned int polling_interval_ms;
+       : Required polling interval in ms. Charger Manager will poll
+       this battery every polling_interval_ms or more frequently.
+@@ -131,10 +142,13 @@ char *psy_fuel_gauge;
+       : Power-supply-class name of the fuel gauge.
+ int (*temperature_out_of_range)(int *mC);
++bool measure_battery_temp;
+       : This callback returns 0 if the temperature is safe for charging,
+       a positive number if it is too hot to charge, and a negative number
+       if it is too cold to charge. With the variable mC, the callback returns
+       the temperature in 1/1000 of centigrade.
++      The source of temperature can be battery or ambient one according to
++      the value of measure_battery_temp.
+ };
+ 5. Other Considerations
+diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
+index 727a259..0378d01 100644
+--- a/drivers/power/charger-manager.c
++++ b/drivers/power/charger-manager.c
+@@ -122,6 +122,32 @@ static bool is_ext_pwr_online(struct charger_manager *cm)
+ }
+ /**
++ * get_batt_uV - Get the voltage level of the battery
++ * @cm: the Charger Manager representing the battery.
++ * @uV: the voltage level returned.
++ *
++ * Returns 0 if there is no error.
++ * Returns a negative value on error.
++ */
++static int get_batt_uV(struct charger_manager *cm, int *uV)
++{
++      union power_supply_propval val;
++      int ret;
++
++      if (cm->fuel_gauge)
++              ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
++                              POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
++      else
++              return -ENODEV;
++
++      if (ret)
++              return ret;
++
++      *uV = val.intval;
++      return 0;
++}
++
++/**
+  * is_charging - Returns true if the battery is being charged.
+  * @cm: the Charger Manager representing the battery.
+  */
+@@ -369,6 +395,208 @@ static bool cm_monitor(void)
+       return stop;
+ }
++static int charger_get_property(struct power_supply *psy,
++              enum power_supply_property psp,
++              union power_supply_propval *val)
++{
++      struct charger_manager *cm = container_of(psy,
++                      struct charger_manager, charger_psy);
++      struct charger_desc *desc = cm->desc;
++      int i, ret = 0, uV;
++
++      switch (psp) {
++      case POWER_SUPPLY_PROP_STATUS:
++              if (is_charging(cm))
++                      val->intval = POWER_SUPPLY_STATUS_CHARGING;
++              else if (is_ext_pwr_online(cm))
++                      val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
++              else
++                      val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
++              break;
++      case POWER_SUPPLY_PROP_HEALTH:
++              if (cm->emergency_stop > 0)
++                      val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
++              else if (cm->emergency_stop < 0)
++                      val->intval = POWER_SUPPLY_HEALTH_COLD;
++              else
++                      val->intval = POWER_SUPPLY_HEALTH_GOOD;
++              break;
++      case POWER_SUPPLY_PROP_PRESENT:
++              if (is_batt_present(cm))
++                      val->intval = 1;
++              else
++                      val->intval = 0;
++              break;
++      case POWER_SUPPLY_PROP_VOLTAGE_NOW:
++              ret = get_batt_uV(cm, &i);
++              val->intval = i;
++              break;
++      case POWER_SUPPLY_PROP_CURRENT_NOW:
++              ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
++                              POWER_SUPPLY_PROP_CURRENT_NOW, val);
++              break;
++      case POWER_SUPPLY_PROP_TEMP:
++              /* in thenth of centigrade */
++              if (cm->last_temp_mC == INT_MIN)
++                      desc->temperature_out_of_range(&cm->last_temp_mC);
++              val->intval = cm->last_temp_mC / 100;
++              if (!desc->measure_battery_temp)
++                      ret = -ENODEV;
++              break;
++      case POWER_SUPPLY_PROP_TEMP_AMBIENT:
++              /* in thenth of centigrade */
++              if (cm->last_temp_mC == INT_MIN)
++                      desc->temperature_out_of_range(&cm->last_temp_mC);
++              val->intval = cm->last_temp_mC / 100;
++              if (desc->measure_battery_temp)
++                      ret = -ENODEV;
++              break;
++      case POWER_SUPPLY_PROP_CAPACITY:
++              if (!cm->fuel_gauge) {
++                      ret = -ENODEV;
++                      break;
++              }
++
++              if (!is_batt_present(cm)) {
++                      /* There is no battery. Assume 100% */
++                      val->intval = 100;
++                      break;
++              }
++
++              ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
++                                      POWER_SUPPLY_PROP_CAPACITY, val);
++              if (ret)
++                      break;
++
++              if (val->intval > 100) {
++                      val->intval = 100;
++                      break;
++              }
++              if (val->intval < 0)
++                      val->intval = 0;
++
++              /* Do not adjust SOC when charging: voltage is overrated */
++              if (is_charging(cm))
++                      break;
++
++              /*
++               * If the capacity value is inconsistent, calibrate it base on
++               * the battery voltage values and the thresholds given as desc
++               */
++              ret = get_batt_uV(cm, &uV);
++              if (ret) {
++                      /* Voltage information not available. No calibration */
++                      ret = 0;
++                      break;
++              }
++
++              if (desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV &&
++                  !is_charging(cm)) {
++                      val->intval = 100;
++                      break;
++              }
++
++              break;
++      case POWER_SUPPLY_PROP_ONLINE:
++              if (is_ext_pwr_online(cm))
++                      val->intval = 1;
++              else
++                      val->intval = 0;
++              break;
++      case POWER_SUPPLY_PROP_CHARGE_FULL:
++              if (cm->fuel_gauge) {
++                      if (cm->fuel_gauge->get_property(cm->fuel_gauge,
++                          POWER_SUPPLY_PROP_CHARGE_FULL, val) == 0)
++                              break;
++              }
++
++              if (is_ext_pwr_online(cm)) {
++                      /* Not full if it's charging. */
++                      if (is_charging(cm)) {
++                              val->intval = 0;
++                              break;
++                      }
++                      /*
++                       * Full if it's powered but not charging andi
++                       * not forced stop by emergency
++                       */
++                      if (!cm->emergency_stop) {
++                              val->intval = 1;
++                              break;
++                      }
++              }
++
++              /* Full if it's over the fullbatt voltage */
++              ret = get_batt_uV(cm, &uV);
++              if (!ret && desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV &&
++                  !is_charging(cm)) {
++                      val->intval = 1;
++                      break;
++              }
++
++              /* Full if the cap is 100 */
++              if (cm->fuel_gauge) {
++                      ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
++                                      POWER_SUPPLY_PROP_CAPACITY, val);
++                      if (!ret && val->intval >= 100 && !is_charging(cm)) {
++                              val->intval = 1;
++                              break;
++                      }
++              }
++
++              val->intval = 0;
++              ret = 0;
++              break;
++      case POWER_SUPPLY_PROP_CHARGE_NOW:
++              if (is_charging(cm)) {
++                      ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
++                                              POWER_SUPPLY_PROP_CHARGE_NOW,
++                                              val);
++                      if (ret) {
++                              val->intval = 1;
++                              ret = 0;
++                      } else {
++                              /* If CHARGE_NOW is supplied, use it */
++                              val->intval = (val->intval > 0) ?
++                                              val->intval : 1;
++                      }
++              } else {
++                      val->intval = 0;
++              }
++              break;
++      default:
++              return -EINVAL;
++      }
++      return ret;
++}
++
++#define NUM_CHARGER_PSY_OPTIONAL      (4)
++static enum power_supply_property default_charger_props[] = {
++      /* Guaranteed to provide */
++      POWER_SUPPLY_PROP_STATUS,
++      POWER_SUPPLY_PROP_HEALTH,
++      POWER_SUPPLY_PROP_PRESENT,
++      POWER_SUPPLY_PROP_VOLTAGE_NOW,
++      POWER_SUPPLY_PROP_CAPACITY,
++      POWER_SUPPLY_PROP_ONLINE,
++      POWER_SUPPLY_PROP_CHARGE_FULL,
++      /*
++       * Optional properties are:
++       * POWER_SUPPLY_PROP_CHARGE_NOW,
++       * POWER_SUPPLY_PROP_CURRENT_NOW,
++       * POWER_SUPPLY_PROP_TEMP, and
++       * POWER_SUPPLY_PROP_TEMP_AMBIENT,
++       */
++};
++
++static struct power_supply psy_default = {
++      .name = "battery",
++      .type = POWER_SUPPLY_TYPE_BATTERY,
++      .properties = default_charger_props,
++      .num_properties = ARRAY_SIZE(default_charger_props),
++      .get_property = charger_get_property,
++};
++
+ /**
+  * cm_setup_timer - For in-suspend monitoring setup wakeup alarm
+  *                for suspend_again.
+@@ -532,6 +760,7 @@ static int charger_manager_probe(struct platform_device *pdev)
+       struct charger_desc *desc = dev_get_platdata(&pdev->dev);
+       struct charger_manager *cm;
+       int ret = 0, i = 0;
++      union power_supply_propval val;
+       if (g_desc && !rtc_dev && g_desc->rtc_name) {
+               rtc_dev = rtc_class_open(g_desc->rtc_name);
+@@ -626,11 +855,68 @@ static int charger_manager_probe(struct platform_device *pdev)
+       platform_set_drvdata(pdev, cm);
++      memcpy(&cm->charger_psy, &psy_default,
++                              sizeof(psy_default));
++      if (!desc->psy_name) {
++              strncpy(cm->psy_name_buf, psy_default.name,
++                              PSY_NAME_MAX);
++      } else {
++              strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
++      }
++      cm->charger_psy.name = cm->psy_name_buf;
++
++      /* Allocate for psy properties because they may vary */
++      cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property)
++                              * (ARRAY_SIZE(default_charger_props) +
++                              NUM_CHARGER_PSY_OPTIONAL),
++                              GFP_KERNEL);
++      if (!cm->charger_psy.properties) {
++              dev_err(&pdev->dev, "Cannot allocate for psy properties.\n");
++              ret = -ENOMEM;
++              goto err_chg_stat;
++      }
++      memcpy(cm->charger_psy.properties, default_charger_props,
++              sizeof(enum power_supply_property) *
++              ARRAY_SIZE(default_charger_props));
++      cm->charger_psy.num_properties = psy_default.num_properties;
++
++      /* Find which optional psy-properties are available */
++      if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
++                                        POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
++              cm->charger_psy.properties[cm->charger_psy.num_properties] =
++                              POWER_SUPPLY_PROP_CHARGE_NOW;
++              cm->charger_psy.num_properties++;
++      }
++      if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
++                                        POWER_SUPPLY_PROP_CURRENT_NOW,
++                                        &val)) {
++              cm->charger_psy.properties[cm->charger_psy.num_properties] =
++                              POWER_SUPPLY_PROP_CURRENT_NOW;
++              cm->charger_psy.num_properties++;
++      }
++      if (!desc->measure_battery_temp) {
++              cm->charger_psy.properties[cm->charger_psy.num_properties] =
++                              POWER_SUPPLY_PROP_TEMP_AMBIENT;
++              cm->charger_psy.num_properties++;
++      }
++      if (desc->measure_battery_temp) {
++              cm->charger_psy.properties[cm->charger_psy.num_properties] =
++                              POWER_SUPPLY_PROP_TEMP;
++              cm->charger_psy.num_properties++;
++      }
++
++      ret = power_supply_register(NULL, &cm->charger_psy);
++      if (ret) {
++              dev_err(&pdev->dev, "Cannot register charger-manager with"
++                              " name \"%s\".\n", cm->charger_psy.name);
++              goto err_register;
++      }
++
+       ret = regulator_bulk_get(&pdev->dev, desc->num_charger_regulators,
+                                desc->charger_regulators);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot get charger regulators.\n");
+-              goto err_chg_stat;
++              goto err_bulk_get;
+       }
+       ret = try_charger_enable(cm, true);
+@@ -650,6 +936,10 @@ err_chg_enable:
+       if (desc->charger_regulators)
+               regulator_bulk_free(desc->num_charger_regulators,
+                                       desc->charger_regulators);
++err_bulk_get:
++      power_supply_unregister(&cm->charger_psy);
++err_register:
++      kfree(cm->charger_psy.properties);
+ err_chg_stat:
+       kfree(cm->charger_stat);
+ err_no_charger_stat:
+@@ -674,6 +964,9 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
+       if (desc->charger_regulators)
+               regulator_bulk_free(desc->num_charger_regulators,
+                                       desc->charger_regulators);
++
++      power_supply_unregister(&cm->charger_psy);
++      kfree(cm->charger_psy.properties);
+       kfree(cm->charger_stat);
+       kfree(cm->desc);
+       kfree(cm);
+diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
+index 102c5b3..4f75e53 100644
+--- a/include/linux/power/charger-manager.h
++++ b/include/linux/power/charger-manager.h
+@@ -47,8 +47,12 @@ struct charger_global_desc {
+ /**
+  * struct charger_desc
++ * @psy_name: the name of power-supply-class for charger manager
+  * @polling_mode:
+  *    Determine which polling mode will be used
++ * @fullbatt_uV: voltage in microvolt
++ *    If it is not being charged and VBATT >= fullbatt_uV,
++ *    it is assumed to be full.
+  * @polling_interval_ms: interval in millisecond at which
+  *    charger manager will monitor battery health
+  * @battery_present:
+@@ -62,11 +66,18 @@ struct charger_global_desc {
+  *    return_value > 0: overheat
+  *    return_value == 0: normal
+  *    return_value < 0: cold
++ * @measure_battery_temp:
++ *    true: measure battery temperature
++ *    false: measure ambient temperature
+  */
+ struct charger_desc {
++      char *psy_name;
++
+       enum polling_modes polling_mode;
+       unsigned int polling_interval_ms;
++      unsigned int fullbatt_uV;
++
+       enum data_source battery_present;
+       char **psy_charger_stat;
+@@ -77,6 +88,7 @@ struct charger_desc {
+       char *psy_fuel_gauge;
+       int (*temperature_out_of_range)(int *mC);
++      bool measure_battery_temp;
+ };
+ #define PSY_NAME_MAX  30
+@@ -92,6 +104,8 @@ struct charger_desc {
+  * @emergency_stop:
+  *    When setting true, stop charging
+  * @last_temp_mC: the measured temperature in milli-Celsius
++ * @psy_name_buf: the name of power-supply-class for charger manager
++ * @charger_psy: power_supply for charger manager
+  * @status_save_ext_pwr_inserted:
+  *    saved status of external power before entering suspend-to-RAM
+  * @status_save_batt:
+@@ -110,6 +124,9 @@ struct charger_manager {
+       int emergency_stop;
+       int last_temp_mC;
++      char psy_name_buf[PSY_NAME_MAX + 1];
++      struct power_supply charger_psy;
++
+       bool status_save_ext_pwr_inserted;
+       bool status_save_batt;
+ };
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0168-PM-Domains-Fix-build-for-CONFIG_PM_SLEEP-unset.patch b/patches.runtime_pm/0168-PM-Domains-Fix-build-for-CONFIG_PM_SLEEP-unset.patch
new file mode 100644 (file)
index 0000000..f5ee60b
--- /dev/null
@@ -0,0 +1,55 @@
+From 04a1d345aba69929681a8e902e398b2816bdf858 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 14 Jan 2012 00:39:25 +0100
+Subject: PM / Domains: Fix build for CONFIG_PM_SLEEP unset
+
+Some callback functions defined in drivers/base/power/domain.c are
+only necessary if CONFIG_PM_SLEEP is set and they call some other
+functions that are only available in that case.  For this reason,
+they should not be compiled at all when CONFIG_PM_SLEEP is not set.
+
+Reported-by: Magnus Damm <damm@opensource.se>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 0f1d6986bae57b6d11e2c9ce5e66b6c6b0e3684d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 92e6a90..978bbf7 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1429,6 +1429,8 @@ static int pm_genpd_default_restore_state(struct device *dev)
+       return 0;
+ }
++#ifdef CONFIG_PM_SLEEP
++
+ /**
+  * pm_genpd_default_suspend - Default "device suspend" for PM domians.
+  * @dev: Device to handle.
+@@ -1517,6 +1519,19 @@ static int pm_genpd_default_thaw(struct device *dev)
+       return cb ? cb(dev) : pm_generic_thaw(dev);
+ }
++#else /* !CONFIG_PM_SLEEP */
++
++#define pm_genpd_default_suspend      NULL
++#define pm_genpd_default_suspend_late NULL
++#define pm_genpd_default_resume_early NULL
++#define pm_genpd_default_resume               NULL
++#define pm_genpd_default_freeze               NULL
++#define pm_genpd_default_freeze_late  NULL
++#define pm_genpd_default_thaw_early   NULL
++#define pm_genpd_default_thaw         NULL
++
++#endif /* !CONFIG_PM_SLEEP */
++
+ /**
+  * pm_genpd_init - Initialize a generic I/O PM domain object.
+  * @genpd: PM domain object to initialize.
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0169-PM-Domains-Skip-governor-functions-for-CONFIG_PM_RUN.patch b/patches.runtime_pm/0169-PM-Domains-Skip-governor-functions-for-CONFIG_PM_RUN.patch
new file mode 100644 (file)
index 0000000..e95e7e0
--- /dev/null
@@ -0,0 +1,69 @@
+From cfff8ac55b042842b4b3bb543f4a5bd64190d04c Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 14 Jan 2012 00:39:36 +0100
+Subject: PM / Domains: Skip governor functions for CONFIG_PM_RUNTIME unset
+
+The governor functions in drivers/base/power/domain_governor.c
+are only used if CONFIG_PM_RUNTIME is set and they refer to data
+structures that are only present in that case.  For this reason,
+they shouldn't be compiled at all when CONFIG_PM_RUNTIME is not set.
+
+Reported-by: Kukjin Kim <kgene.kim@samsung.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e59a8db8d9b7c02e0bbefbeb18a3836288a97b8a)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain_governor.c |   24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
+index 51527ee..66a265b 100644
+--- a/drivers/base/power/domain_governor.c
++++ b/drivers/base/power/domain_governor.c
+@@ -12,6 +12,8 @@
+ #include <linux/pm_qos.h>
+ #include <linux/hrtimer.h>
++#ifdef CONFIG_PM_RUNTIME
++
+ /**
+  * default_stop_ok - Default PM domain governor routine for stopping devices.
+  * @dev: Device to check.
+@@ -137,16 +139,28 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
+       return true;
+ }
+-struct dev_power_governor simple_qos_governor = {
+-      .stop_ok = default_stop_ok,
+-      .power_down_ok = default_power_down_ok,
+-};
+-
+ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+ {
+       return false;
+ }
++#else /* !CONFIG_PM_RUNTIME */
++
++bool default_stop_ok(struct device *dev)
++{
++      return false;
++}
++
++#define default_power_down_ok NULL
++#define always_on_power_down_ok       NULL
++
++#endif /* !CONFIG_PM_RUNTIME */
++
++struct dev_power_governor simple_qos_governor = {
++      .stop_ok = default_stop_ok,
++      .power_down_ok = default_power_down_ok,
++};
++
+ /**
+  * pm_genpd_gov_always_on - A governor implementing an always-on policy
+  */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0170-PM-Documentation-Fix-spelling-mistake-in-basic-pm-de.patch b/patches.runtime_pm/0170-PM-Documentation-Fix-spelling-mistake-in-basic-pm-de.patch
new file mode 100644 (file)
index 0000000..b6e7b12
--- /dev/null
@@ -0,0 +1,30 @@
+From f1cb94feb8a611bd0d4d0bef6fb610b0979fb719 Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@st.com>
+Date: Thu, 19 Jan 2012 23:22:38 +0100
+Subject: PM / Documentation: Fix spelling mistake in basic-pm-debugging.txt
+
+Signed-off-by: Viresh Kumar <viresh.kumar@st.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit f581b63aa1049ac030d6eb6c24e1be1ce2072ae7)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/basic-pm-debugging.txt |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
+index 40a4c65..262acf5 100644
+--- a/Documentation/power/basic-pm-debugging.txt
++++ b/Documentation/power/basic-pm-debugging.txt
+@@ -15,7 +15,7 @@ test at least a couple of times in a row for confidence.  [This is necessary,
+ because some problems only show up on a second attempt at suspending and
+ resuming the system.]  Moreover, hibernating in the "reboot" and "shutdown"
+ modes causes the PM core to skip some platform-related callbacks which on ACPI
+-systems might be necessary to make hibernation work.  Thus, if you machine fails
++systems might be necessary to make hibernation work.  Thus, if your machine fails
+ to hibernate or resume in the "reboot" mode, you should try the "platform" mode:
+ # echo platform > /sys/power/disk
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0171-PM-Documentation-Fix-minor-issue-in-freezing_of_task.patch b/patches.runtime_pm/0171-PM-Documentation-Fix-minor-issue-in-freezing_of_task.patch
new file mode 100644 (file)
index 0000000..3b5f798
--- /dev/null
@@ -0,0 +1,42 @@
+From 7cc00c63b297865362af93050bd1abc40eee595f Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@st.com>
+Date: Thu, 19 Jan 2012 23:22:49 +0100
+Subject: PM / Documentation: Fix minor issue in freezing_of_tasks.txt
+
+In a paragraph, "kernel thread" is mistakenly written as "kernel". Fix this by
+adding thread after word "kernel".
+
+Changes are shown in multiple lines, as they are realigned to 80 col width.
+
+Signed-off-by: Viresh Kumar <viresh.kumar@st.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 5eb6f9ad96967be4e0da55521a253e11b534bd3f)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/freezing-of-tasks.txt |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
+index 6ccb68f..ebd7490 100644
+--- a/Documentation/power/freezing-of-tasks.txt
++++ b/Documentation/power/freezing-of-tasks.txt
+@@ -120,10 +120,10 @@ So in practice, the 'at all' may become a 'why freeze kernel threads?' and
+ freezing user threads I don't find really objectionable."
+ Still, there are kernel threads that may want to be freezable.  For example, if
+-a kernel that belongs to a device driver accesses the device directly, it in
+-principle needs to know when the device is suspended, so that it doesn't try to
+-access it at that time.  However, if the kernel thread is freezable, it will be
+-frozen before the driver's .suspend() callback is executed and it will be
++a kernel thread that belongs to a device driver accesses the device directly, it
++in principle needs to know when the device is suspended, so that it doesn't try
++to access it at that time.  However, if the kernel thread is freezable, it will
++be frozen before the driver's .suspend() callback is executed and it will be
+ thawed after the driver's .resume() callback has run, so it won't be accessing
+ the device while it's suspended.
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0172-PM-Hibernate-Correct-additional-pages-number-calcula.patch b/patches.runtime_pm/0172-PM-Hibernate-Correct-additional-pages-number-calcula.patch
new file mode 100644 (file)
index 0000000..f800f45
--- /dev/null
@@ -0,0 +1,34 @@
+From bcde79abed47acf85fed2f7a6fb8a30797b88b65 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung.kim@lge.com>
+Date: Thu, 19 Jan 2012 23:23:10 +0100
+Subject: PM / Hibernate: Correct additional pages number calculation
+
+The struct bm_block is allocated by chain_alloc(),
+so it'd better counting it in LINKED_PAGE_DATA_SIZE.
+
+Signed-off-by: Namhyung Kim <namhyung.kim@lge.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 160cb5a97daef0cb894685d84c9d4700bb7cccb4)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/snapshot.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 1cf8890..6a768e5 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -812,7 +812,8 @@ unsigned int snapshot_additional_pages(struct zone *zone)
+       unsigned int res;
+       res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
+-      res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
++      res += DIV_ROUND_UP(res * sizeof(struct bm_block),
++                          LINKED_PAGE_DATA_SIZE);
+       return 2 * res;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0173-PM-Domains-Add-OF-support.patch b/patches.runtime_pm/0173-PM-Domains-Add-OF-support.patch
new file mode 100644 (file)
index 0000000..fccf44f
--- /dev/null
@@ -0,0 +1,124 @@
+From 314661eba783d9fa1cd1a2e10dc37479d432df35 Mon Sep 17 00:00:00 2001
+From: Thomas Abraham <thomas.abraham@linaro.org>
+Date: Fri, 27 Jan 2012 15:22:07 +0900
+Subject: PM / Domains: Add OF support
+
+A device node pointer is added to generic pm domain structure to associate
+the domain with a node in the device tree. The platform code parses the
+device tree to find available nodes representing the generic power domain,
+instantiates the available domains and initializes them by calling
+pm_genpd_init().
+
+Nodes representing the devices include a phandle of the power domain to
+which it belongs. As these devices get instantiated, the driver code
+checkes for availability of a power domain phandle, converts the phandle
+to a device node and uses the new pm_genpd_of_add_device() api to
+associate the device with a power domain.
+
+pm_genpd_of_add_device() runs through its list of registered power domains
+and matches the OF node of the domain with the one specified as the
+parameter. If a match is found, the device is associated with the matched
+domain.
+
+Cc: Rob Herring <rob.herring@calxeda.com>
+Cc: Grant Likely <grant.likely@secretlab.ca>
+Signed-off-by: Thomas Abraham <thomas.abraham@linaro.org>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
+(cherry picked from commit c8aa130b74cc5b112cb2b119d3b477abaaf6e5b2)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   32 ++++++++++++++++++++++++++++++++
+ include/linux/pm_domain.h   |   12 ++++++++++++
+ 2 files changed, 44 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 978bbf7..939109b 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1171,6 +1171,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ }
+ /**
++ * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
++ * @genpd_node: Device tree node pointer representing a PM domain to which the
++ *   the device is added to.
++ * @dev: Device to be added.
++ * @td: Set of PM QoS timing parameters to attach to the device.
++ */
++int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
++                           struct gpd_timing_data *td)
++{
++      struct generic_pm_domain *genpd = NULL, *gpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
++              return -EINVAL;
++
++      mutex_lock(&gpd_list_lock);
++      list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
++              if (gpd->of_node == genpd_node) {
++                      genpd = gpd;
++                      break;
++              }
++      }
++      mutex_unlock(&gpd_list_lock);
++
++      if (!genpd)
++              return -EINVAL;
++
++      return __pm_genpd_add_device(genpd, dev, td);
++}
++
++/**
+  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+  * @genpd: PM domain to remove the device from.
+  * @dev: Device to be removed.
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index a03a0ad..e3ff875 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -11,6 +11,7 @@
+ #include <linux/device.h>
+ #include <linux/err.h>
++#include <linux/of.h>
+ enum gpd_status {
+       GPD_STATE_ACTIVE = 0,   /* PM domain is active */
+@@ -70,6 +71,7 @@ struct generic_pm_domain {
+       s64 break_even_ns;      /* Power break even for the entire domain. */
+       s64 max_off_time_ns;    /* Maximum allowed "suspended" time. */
+       ktime_t power_off_time;
++      struct device_node *of_node; /* Node in device tree */
+ };
+ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+@@ -117,12 +119,22 @@ extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                struct device *dev,
+                                struct gpd_timing_data *td);
++extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
++                                  struct device *dev,
++                                  struct gpd_timing_data *td);
++
+ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+ {
+       return __pm_genpd_add_device(genpd, dev, NULL);
+ }
++static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
++                                       struct device *dev)
++{
++      return __pm_genpd_of_add_device(genpd_node, dev, NULL);
++}
++
+ extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+                                 struct device *dev);
+ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0174-PM-Hibernate-Fix-s2disk-regression-related-to-freezi.patch b/patches.runtime_pm/0174-PM-Hibernate-Fix-s2disk-regression-related-to-freezi.patch
new file mode 100644 (file)
index 0000000..ee53598
--- /dev/null
@@ -0,0 +1,110 @@
+From 7076202f971b113497daf7a80c26cf16e59ef112 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 29 Jan 2012 20:35:52 +0100
+Subject: PM / Hibernate: Fix s2disk regression related to freezing workqueues
+
+Commit 2aede851ddf08666f68ffc17be446420e9d2a056
+
+  PM / Hibernate: Freeze kernel threads after preallocating memory
+
+introduced a mechanism by which kernel threads were frozen after
+the preallocation of hibernate image memory to avoid problems with
+frozen kernel threads not responding to memory freeing requests.
+However, it overlooked the s2disk code path in which the
+SNAPSHOT_CREATE_IMAGE ioctl was run directly after SNAPSHOT_FREE,
+which caused freeze_workqueues_begin() to BUG(), because it saw
+that worqueues had been already frozen.
+
+Although in principle this issue might be addressed by removing
+the relevant BUG_ON() from freeze_workqueues_begin(), that would
+reintroduce the very problem that commit 2aede851ddf08666f68ffc17be4
+attempted to avoid into that particular code path.  For this reason,
+to fix the issue at hand, introduce thaw_kernel_threads() and make
+the SNAPSHOT_FREE ioctl execute it.
+
+Special thanks to Srivatsa S. Bhat for detailed analysis of the
+problem.
+
+Reported-and-tested-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Cc: stable@kernel.org
+(cherry picked from commit 181e9bdef37bfcaa41f3ab6c948a2a0d60a268b5)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/freezer.h |    2 ++
+ kernel/power/process.c  |   19 +++++++++++++++++++
+ kernel/power/user.c     |    9 +++++++++
+ 3 files changed, 30 insertions(+)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index b79db3d..a1e0727 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -44,6 +44,7 @@ extern bool __refrigerator(bool check_kthr_stop);
+ extern int freeze_processes(void);
+ extern int freeze_kernel_threads(void);
+ extern void thaw_processes(void);
++extern void thaw_kernel_threads(void);
+ static inline bool try_to_freeze(void)
+ {
+@@ -164,6 +165,7 @@ static inline bool __refrigerator(bool check_kthr_stop) { return false; }
+ static inline int freeze_processes(void) { return -ENOSYS; }
+ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
+ static inline void thaw_processes(void) {}
++static inline void thaw_kernel_threads(void) {}
+ static inline bool try_to_freeze(void) { return false; }
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 77274c9..eeca003 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -188,3 +188,22 @@ void thaw_processes(void)
+       printk("done.\n");
+ }
++void thaw_kernel_threads(void)
++{
++      struct task_struct *g, *p;
++
++      pm_nosig_freezing = false;
++      printk("Restarting kernel threads ... ");
++
++      thaw_workqueues();
++
++      read_lock(&tasklist_lock);
++      do_each_thread(g, p) {
++              if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
++                      __thaw_task(p);
++      } while_each_thread(g, p);
++      read_unlock(&tasklist_lock);
++
++      schedule();
++      printk("done.\n");
++}
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 6b1ab7a..e5a21a8 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -274,6 +274,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               swsusp_free();
+               memset(&data->handle, 0, sizeof(struct snapshot_handle));
+               data->ready = 0;
++              /*
++               * It is necessary to thaw kernel threads here, because
++               * SNAPSHOT_CREATE_IMAGE may be invoked directly after
++               * SNAPSHOT_FREE.  In that case, if kernel threads were not
++               * thawed, the preallocation of memory carried out by
++               * hibernation_snapshot() might run into problems (i.e. it
++               * might fail or even deadlock).
++               */
++              thaw_kernel_threads();
+               break;
+       case SNAPSHOT_PREF_IMAGE_SIZE:
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0175-PM-Sleep-Introduce-late-suspend-and-early-resume-of-.patch b/patches.runtime_pm/0175-PM-Sleep-Introduce-late-suspend-and-early-resume-of-.patch
new file mode 100644 (file)
index 0000000..99995d4
--- /dev/null
@@ -0,0 +1,979 @@
+From dca3a5c3507d066f750ba2ce7ee8f8f695d75162 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 29 Jan 2012 20:38:29 +0100
+Subject: PM / Sleep: Introduce "late suspend" and "early resume" of devices
+
+The current device suspend/resume phases during system-wide power
+transitions appear to be insufficient for some platforms that want
+to use the same callback routines for saving device states and
+related operations during runtime suspend/resume as well as during
+system suspend/resume.  In principle, they could point their
+.suspend_noirq() and .resume_noirq() to the same callback routines
+as their .runtime_suspend() and .runtime_resume(), respectively,
+but at least some of them require device interrupts to be enabled
+while the code in those routines is running.
+
+It also makes sense to have device suspend-resume callbacks that will
+be executed with runtime PM disabled and with device interrupts
+enabled in case someone needs to run some special code in that
+context during system-wide power transitions.
+
+Apart from this, .suspend_noirq() and .resume_noirq() were introduced
+as a workaround for drivers using shared interrupts and failing to
+prevent their interrupt handlers from accessing suspended hardware.
+It appears to be better not to use them for other porposes, or we may
+have to deal with some serious confusion (which seems to be happening
+already).
+
+For the above reasons, introduce new device suspend/resume phases,
+"late suspend" and "early resume" (and analogously for hibernation)
+whose callback will be executed with runtime PM disabled and with
+device interrupts enabled and whose callback pointers generally may
+point to runtime suspend/resume routines.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+(cherry picked from commit cf579dfb82550e34de7ccf3ef090d8b834ccd3a9)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/devices.txt |   93 ++++++++++-----
+ arch/x86/kernel/apm_32.c        |   11 +-
+ drivers/base/power/main.c       |  247 +++++++++++++++++++++++++++++++++++----
+ drivers/xen/manage.c            |    6 +-
+ include/linux/pm.h              |   43 +++++--
+ include/linux/suspend.h         |    4 +
+ kernel/kexec.c                  |    8 +-
+ kernel/power/hibernate.c        |   24 ++--
+ kernel/power/main.c             |    8 +-
+ kernel/power/suspend.c          |    4 +-
+ 10 files changed, 357 insertions(+), 91 deletions(-)
+
+diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
+index 20af7de..872815c 100644
+--- a/Documentation/power/devices.txt
++++ b/Documentation/power/devices.txt
+@@ -96,6 +96,12 @@ struct dev_pm_ops {
+       int (*thaw)(struct device *dev);
+       int (*poweroff)(struct device *dev);
+       int (*restore)(struct device *dev);
++      int (*suspend_late)(struct device *dev);
++      int (*resume_early)(struct device *dev);
++      int (*freeze_late)(struct device *dev);
++      int (*thaw_early)(struct device *dev);
++      int (*poweroff_late)(struct device *dev);
++      int (*restore_early)(struct device *dev);
+       int (*suspend_noirq)(struct device *dev);
+       int (*resume_noirq)(struct device *dev);
+       int (*freeze_noirq)(struct device *dev);
+@@ -305,7 +311,7 @@ Entering System Suspend
+ -----------------------
+ When the system goes into the standby or memory sleep state, the phases are:
+-              prepare, suspend, suspend_noirq.
++              prepare, suspend, suspend_late, suspend_noirq.
+     1.        The prepare phase is meant to prevent races by preventing new devices
+       from being registered; the PM core would never know that all the
+@@ -324,7 +330,12 @@ When the system goes into the standby or memory sleep state, the phases are:
+       appropriate low-power state, depending on the bus type the device is on,
+       and they may enable wakeup events.
+-    3.        The suspend_noirq phase occurs after IRQ handlers have been disabled,
++    3 For a number of devices it is convenient to split suspend into the
++      "quiesce device" and "save device state" phases, in which cases
++      suspend_late is meant to do the latter.  It is always executed after
++      runtime power management has been disabled for all devices.
++
++    4.        The suspend_noirq phase occurs after IRQ handlers have been disabled,
+       which means that the driver's interrupt handler will not be called while
+       the callback method is running.  The methods should save the values of
+       the device's registers that weren't saved previously and finally put the
+@@ -359,7 +370,7 @@ Leaving System Suspend
+ ----------------------
+ When resuming from standby or memory sleep, the phases are:
+-              resume_noirq, resume, complete.
++              resume_noirq, resume_early, resume, complete.
+     1.        The resume_noirq callback methods should perform any actions needed
+       before the driver's interrupt handlers are invoked.  This generally
+@@ -375,14 +386,18 @@ When resuming from standby or memory sleep, the phases are:
+       device driver's ->pm.resume_noirq() method to perform device-specific
+       actions.
+-    2.        The resume methods should bring the the device back to its operating
++    2.        The resume_early methods should prepare devices for the execution of
++      the resume methods.  This generally involves undoing the actions of the
++      preceding suspend_late phase.
++
++    3 The resume methods should bring the the device back to its operating
+       state, so that it can perform normal I/O.  This generally involves
+       undoing the actions of the suspend phase.
+-    3.        The complete phase uses only a bus callback.  The method should undo the
+-      actions of the prepare phase.  Note, however, that new children may be
+-      registered below the device as soon as the resume callbacks occur; it's
+-      not necessary to wait until the complete phase.
++    4.        The complete phase should undo the actions of the prepare phase.  Note,
++      however, that new children may be registered below the device as soon as
++      the resume callbacks occur; it's not necessary to wait until the
++      complete phase.
+ At the end of these phases, drivers should be as functional as they were before
+ suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are
+@@ -429,8 +444,8 @@ an image of the system memory while everything is stable, reactivate all
+ devices (thaw), write the image to permanent storage, and finally shut down the
+ system (poweroff).  The phases used to accomplish this are:
+-      prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete,
+-      prepare, poweroff, poweroff_noirq
++      prepare, freeze, freeze_late, freeze_noirq, thaw_noirq, thaw_early,
++      thaw, complete, prepare, poweroff, poweroff_late, poweroff_noirq
+     1.        The prepare phase is discussed in the "Entering System Suspend" section
+       above.
+@@ -441,7 +456,11 @@ system (poweroff).  The phases used to accomplish this are:
+       save time it's best not to do so.  Also, the device should not be
+       prepared to generate wakeup events.
+-    3.        The freeze_noirq phase is analogous to the suspend_noirq phase discussed
++    3.        The freeze_late phase is analogous to the suspend_late phase described
++      above, except that the device should not be put in a low-power state and
++      should not be allowed to generate wakeup events by it.
++
++    4.        The freeze_noirq phase is analogous to the suspend_noirq phase discussed
+       above, except again that the device should not be put in a low-power
+       state and should not be allowed to generate wakeup events.
+@@ -449,15 +468,19 @@ At this point the system image is created.  All devices should be inactive and
+ the contents of memory should remain undisturbed while this happens, so that the
+ image forms an atomic snapshot of the system state.
+-    4.        The thaw_noirq phase is analogous to the resume_noirq phase discussed
++    5.        The thaw_noirq phase is analogous to the resume_noirq phase discussed
+       above.  The main difference is that its methods can assume the device is
+       in the same state as at the end of the freeze_noirq phase.
+-    5.        The thaw phase is analogous to the resume phase discussed above.  Its
++    6.        The thaw_early phase is analogous to the resume_early phase described
++      above.  Its methods should undo the actions of the preceding
++      freeze_late, if necessary.
++
++    7.        The thaw phase is analogous to the resume phase discussed above.  Its
+       methods should bring the device back to an operating state, so that it
+       can be used for saving the image if necessary.
+-    6.        The complete phase is discussed in the "Leaving System Suspend" section
++    8.        The complete phase is discussed in the "Leaving System Suspend" section
+       above.
+ At this point the system image is saved, and the devices then need to be
+@@ -465,16 +488,19 @@ prepared for the upcoming system shutdown.  This is much like suspending them
+ before putting the system into the standby or memory sleep state, and the phases
+ are similar.
+-    7.        The prepare phase is discussed above.
++    9.        The prepare phase is discussed above.
++
++    10.       The poweroff phase is analogous to the suspend phase.
+-    8.        The poweroff phase is analogous to the suspend phase.
++    11.       The poweroff_late phase is analogous to the suspend_late phase.
+-    9.        The poweroff_noirq phase is analogous to the suspend_noirq phase.
++    12.       The poweroff_noirq phase is analogous to the suspend_noirq phase.
+-The poweroff and poweroff_noirq callbacks should do essentially the same things
+-as the suspend and suspend_noirq callbacks.  The only notable difference is that
+-they need not store the device register values, because the registers should
+-already have been stored during the freeze or freeze_noirq phases.
++The poweroff, poweroff_late and poweroff_noirq callbacks should do essentially
++the same things as the suspend, suspend_late and suspend_noirq callbacks,
++respectively.  The only notable difference is that they need not store the
++device register values, because the registers should already have been stored
++during the freeze, freeze_late or freeze_noirq phases.
+ Leaving Hibernation
+@@ -518,22 +544,25 @@ To achieve this, the image kernel must restore the devices' pre-hibernation
+ functionality.  The operation is much like waking up from the memory sleep
+ state, although it involves different phases:
+-      restore_noirq, restore, complete
++      restore_noirq, restore_early, restore, complete
+     1.        The restore_noirq phase is analogous to the resume_noirq phase.
+-    2.        The restore phase is analogous to the resume phase.
++    2.        The restore_early phase is analogous to the resume_early phase.
++
++    3.        The restore phase is analogous to the resume phase.
+-    3.        The complete phase is discussed above.
++    4.        The complete phase is discussed above.
+-The main difference from resume[_noirq] is that restore[_noirq] must assume the
+-device has been accessed and reconfigured by the boot loader or the boot kernel.
+-Consequently the state of the device may be different from the state remembered
+-from the freeze and freeze_noirq phases.  The device may even need to be reset
+-and completely re-initialized.  In many cases this difference doesn't matter, so
+-the resume[_noirq] and restore[_norq] method pointers can be set to the same
+-routines.  Nevertheless, different callback pointers are used in case there is a
+-situation where it actually matters.
++The main difference from resume[_early|_noirq] is that restore[_early|_noirq]
++must assume the device has been accessed and reconfigured by the boot loader or
++the boot kernel.  Consequently the state of the device may be different from the
++state remembered from the freeze, freeze_late and freeze_noirq phases.  The
++device may even need to be reset and completely re-initialized.  In many cases
++this difference doesn't matter, so the resume[_early|_noirq] and
++restore[_early|_norq] method pointers can be set to the same routines.
++Nevertheless, different callback pointers are used in case there is a situation
++where it actually does matter.
+ Device Power Management Domains
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index 965a766..99f4ffe 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -1236,8 +1236,7 @@ static int suspend(int vetoable)
+       struct apm_user *as;
+       dpm_suspend_start(PMSG_SUSPEND);
+-
+-      dpm_suspend_noirq(PMSG_SUSPEND);
++      dpm_suspend_end(PMSG_SUSPEND);
+       local_irq_disable();
+       syscore_suspend();
+@@ -1261,9 +1260,9 @@ static int suspend(int vetoable)
+       syscore_resume();
+       local_irq_enable();
+-      dpm_resume_noirq(PMSG_RESUME);
+-
++      dpm_resume_start(PMSG_RESUME);
+       dpm_resume_end(PMSG_RESUME);
++
+       queue_event(APM_NORMAL_RESUME, NULL);
+       spin_lock(&user_list_lock);
+       for (as = user_list; as != NULL; as = as->next) {
+@@ -1279,7 +1278,7 @@ static void standby(void)
+ {
+       int err;
+-      dpm_suspend_noirq(PMSG_SUSPEND);
++      dpm_suspend_end(PMSG_SUSPEND);
+       local_irq_disable();
+       syscore_suspend();
+@@ -1293,7 +1292,7 @@ static void standby(void)
+       syscore_resume();
+       local_irq_enable();
+-      dpm_resume_noirq(PMSG_RESUME);
++      dpm_resume_start(PMSG_RESUME);
+ }
+ static apm_event_t get_event(void)
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index e2cc3d2..b462c0e 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
+ LIST_HEAD(dpm_list);
+ LIST_HEAD(dpm_prepared_list);
+ LIST_HEAD(dpm_suspended_list);
++LIST_HEAD(dpm_late_early_list);
+ LIST_HEAD(dpm_noirq_list);
+ struct suspend_stats suspend_stats;
+@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
+ }
+ /**
++ * pm_late_early_op - Return the PM operation appropriate for given PM event.
++ * @ops: PM operations to choose from.
++ * @state: PM transition of the system being carried out.
++ *
++ * Runtime PM is disabled for @dev while this function is being executed.
++ */
++static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
++                                    pm_message_t state)
++{
++      switch (state.event) {
++#ifdef CONFIG_SUSPEND
++      case PM_EVENT_SUSPEND:
++              return ops->suspend_late;
++      case PM_EVENT_RESUME:
++              return ops->resume_early;
++#endif /* CONFIG_SUSPEND */
++#ifdef CONFIG_HIBERNATE_CALLBACKS
++      case PM_EVENT_FREEZE:
++      case PM_EVENT_QUIESCE:
++              return ops->freeze_late;
++      case PM_EVENT_HIBERNATE:
++              return ops->poweroff_late;
++      case PM_EVENT_THAW:
++      case PM_EVENT_RECOVER:
++              return ops->thaw_early;
++      case PM_EVENT_RESTORE:
++              return ops->restore_early;
++#endif /* CONFIG_HIBERNATE_CALLBACKS */
++      }
++
++      return NULL;
++}
++
++/**
+  * pm_noirq_op - Return the PM operation appropriate for given PM event.
+  * @ops: PM operations to choose from.
+  * @state: PM transition of the system being carried out.
+@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
+       TRACE_RESUME(0);
+       if (dev->pm_domain) {
+-              info = "EARLY power domain ";
++              info = "noirq power domain ";
+               callback = pm_noirq_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+-              info = "EARLY type ";
++              info = "noirq type ";
+               callback = pm_noirq_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+-              info = "EARLY class ";
++              info = "noirq class ";
+               callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+-              info = "EARLY bus ";
++              info = "noirq bus ";
+               callback = pm_noirq_op(dev->bus->pm, state);
+       }
+       if (!callback && dev->driver && dev->driver->pm) {
+-              info = "EARLY driver ";
++              info = "noirq driver ";
+               callback = pm_noirq_op(dev->driver->pm, state);
+       }
+@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
+ }
+ /**
+- * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
++ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+  * @state: PM transition of the system being carried out.
+  *
+- * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
++ * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
+  * enable device drivers to receive interrupts.
+  */
+-void dpm_resume_noirq(pm_message_t state)
++static void dpm_resume_noirq(pm_message_t state)
+ {
+       ktime_t starttime = ktime_get();
+@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
+               int error;
+               get_device(dev);
+-              list_move_tail(&dev->power.entry, &dpm_suspended_list);
++              list_move_tail(&dev->power.entry, &dpm_late_early_list);
+               mutex_unlock(&dpm_list_mtx);
+               error = device_resume_noirq(dev, state);
+@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
+                       suspend_stats.failed_resume_noirq++;
+                       dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+                       dpm_save_failed_dev(dev_name(dev));
++                      pm_dev_err(dev, state, " noirq", error);
++              }
++
++              mutex_lock(&dpm_list_mtx);
++              put_device(dev);
++      }
++      mutex_unlock(&dpm_list_mtx);
++      dpm_show_time(starttime, state, "noirq");
++      resume_device_irqs();
++}
++
++/**
++ * device_resume_early - Execute an "early resume" callback for given device.
++ * @dev: Device to handle.
++ * @state: PM transition of the system being carried out.
++ *
++ * Runtime PM is disabled for @dev while this function is being executed.
++ */
++static int device_resume_early(struct device *dev, pm_message_t state)
++{
++      pm_callback_t callback = NULL;
++      char *info = NULL;
++      int error = 0;
++
++      TRACE_DEVICE(dev);
++      TRACE_RESUME(0);
++
++      if (dev->pm_domain) {
++              info = "early power domain ";
++              callback = pm_late_early_op(&dev->pm_domain->ops, state);
++      } else if (dev->type && dev->type->pm) {
++              info = "early type ";
++              callback = pm_late_early_op(dev->type->pm, state);
++      } else if (dev->class && dev->class->pm) {
++              info = "early class ";
++              callback = pm_late_early_op(dev->class->pm, state);
++      } else if (dev->bus && dev->bus->pm) {
++              info = "early bus ";
++              callback = pm_late_early_op(dev->bus->pm, state);
++      }
++
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "early driver ";
++              callback = pm_late_early_op(dev->driver->pm, state);
++      }
++
++      error = dpm_run_callback(callback, dev, state, info);
++
++      TRACE_RESUME(error);
++      return error;
++}
++
++/**
++ * dpm_resume_early - Execute "early resume" callbacks for all devices.
++ * @state: PM transition of the system being carried out.
++ */
++static void dpm_resume_early(pm_message_t state)
++{
++      ktime_t starttime = ktime_get();
++
++      mutex_lock(&dpm_list_mtx);
++      while (!list_empty(&dpm_late_early_list)) {
++              struct device *dev = to_device(dpm_late_early_list.next);
++              int error;
++
++              get_device(dev);
++              list_move_tail(&dev->power.entry, &dpm_suspended_list);
++              mutex_unlock(&dpm_list_mtx);
++
++              error = device_resume_early(dev, state);
++              if (error) {
++                      suspend_stats.failed_resume_early++;
++                      dpm_save_failed_step(SUSPEND_RESUME_EARLY);
++                      dpm_save_failed_dev(dev_name(dev));
+                       pm_dev_err(dev, state, " early", error);
+               }
+@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
+       }
+       mutex_unlock(&dpm_list_mtx);
+       dpm_show_time(starttime, state, "early");
+-      resume_device_irqs();
+ }
+-EXPORT_SYMBOL_GPL(dpm_resume_noirq);
++
++/**
++ * dpm_resume_start - Execute "noirq" and "early" device callbacks.
++ * @state: PM transition of the system being carried out.
++ */
++void dpm_resume_start(pm_message_t state)
++{
++      dpm_resume_noirq(state);
++      dpm_resume_early(state);
++}
++EXPORT_SYMBOL_GPL(dpm_resume_start);
+ /**
+  * device_resume - Execute "resume" callbacks for given device.
+@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
+       char *info = NULL;
+       if (dev->pm_domain) {
+-              info = "LATE power domain ";
++              info = "noirq power domain ";
+               callback = pm_noirq_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+-              info = "LATE type ";
++              info = "noirq type ";
+               callback = pm_noirq_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+-              info = "LATE class ";
++              info = "noirq class ";
+               callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+-              info = "LATE bus ";
++              info = "noirq bus ";
+               callback = pm_noirq_op(dev->bus->pm, state);
+       }
+       if (!callback && dev->driver && dev->driver->pm) {
+-              info = "LATE driver ";
++              info = "noirq driver ";
+               callback = pm_noirq_op(dev->driver->pm, state);
+       }
+@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
+ }
+ /**
+- * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
++ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+  * @state: PM transition of the system being carried out.
+  *
+  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
+  * handlers for all non-sysdev devices.
+  */
+-int dpm_suspend_noirq(pm_message_t state)
++static int dpm_suspend_noirq(pm_message_t state)
+ {
+       ktime_t starttime = ktime_get();
+       int error = 0;
+       suspend_device_irqs();
+       mutex_lock(&dpm_list_mtx);
+-      while (!list_empty(&dpm_suspended_list)) {
+-              struct device *dev = to_device(dpm_suspended_list.prev);
++      while (!list_empty(&dpm_late_early_list)) {
++              struct device *dev = to_device(dpm_late_early_list.prev);
+               get_device(dev);
+               mutex_unlock(&dpm_list_mtx);
+@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
+               mutex_lock(&dpm_list_mtx);
+               if (error) {
+-                      pm_dev_err(dev, state, " late", error);
++                      pm_dev_err(dev, state, " noirq", error);
+                       suspend_stats.failed_suspend_noirq++;
+                       dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
+                       dpm_save_failed_dev(dev_name(dev));
+@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
+       if (error)
+               dpm_resume_noirq(resume_event(state));
+       else
++              dpm_show_time(starttime, state, "noirq");
++      return error;
++}
++
++/**
++ * device_suspend_late - Execute a "late suspend" callback for given device.
++ * @dev: Device to handle.
++ * @state: PM transition of the system being carried out.
++ *
++ * Runtime PM is disabled for @dev while this function is being executed.
++ */
++static int device_suspend_late(struct device *dev, pm_message_t state)
++{
++      pm_callback_t callback = NULL;
++      char *info = NULL;
++
++      if (dev->pm_domain) {
++              info = "late power domain ";
++              callback = pm_late_early_op(&dev->pm_domain->ops, state);
++      } else if (dev->type && dev->type->pm) {
++              info = "late type ";
++              callback = pm_late_early_op(dev->type->pm, state);
++      } else if (dev->class && dev->class->pm) {
++              info = "late class ";
++              callback = pm_late_early_op(dev->class->pm, state);
++      } else if (dev->bus && dev->bus->pm) {
++              info = "late bus ";
++              callback = pm_late_early_op(dev->bus->pm, state);
++      }
++
++      if (!callback && dev->driver && dev->driver->pm) {
++              info = "late driver ";
++              callback = pm_late_early_op(dev->driver->pm, state);
++      }
++
++      return dpm_run_callback(callback, dev, state, info);
++}
++
++/**
++ * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
++ * @state: PM transition of the system being carried out.
++ */
++static int dpm_suspend_late(pm_message_t state)
++{
++      ktime_t starttime = ktime_get();
++      int error = 0;
++
++      mutex_lock(&dpm_list_mtx);
++      while (!list_empty(&dpm_suspended_list)) {
++              struct device *dev = to_device(dpm_suspended_list.prev);
++
++              get_device(dev);
++              mutex_unlock(&dpm_list_mtx);
++
++              error = device_suspend_late(dev, state);
++
++              mutex_lock(&dpm_list_mtx);
++              if (error) {
++                      pm_dev_err(dev, state, " late", error);
++                      suspend_stats.failed_suspend_late++;
++                      dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
++                      dpm_save_failed_dev(dev_name(dev));
++                      put_device(dev);
++                      break;
++              }
++              if (!list_empty(&dev->power.entry))
++                      list_move(&dev->power.entry, &dpm_late_early_list);
++              put_device(dev);
++      }
++      mutex_unlock(&dpm_list_mtx);
++      if (error)
++              dpm_resume_early(resume_event(state));
++      else
+               dpm_show_time(starttime, state, "late");
++
+       return error;
+ }
+-EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
++
++/**
++ * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
++ * @state: PM transition of the system being carried out.
++ */
++int dpm_suspend_end(pm_message_t state)
++{
++      int error = dpm_suspend_late(state);
++
++      return error ? : dpm_suspend_noirq(state);
++}
++EXPORT_SYMBOL_GPL(dpm_suspend_end);
+ /**
+  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index 0b5366b..3fce572 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -128,9 +128,9 @@ static void do_suspend(void)
+       printk(KERN_DEBUG "suspending xenstore...\n");
+       xs_suspend();
+-      err = dpm_suspend_noirq(PMSG_FREEZE);
++      err = dpm_suspend_end(PMSG_FREEZE);
+       if (err) {
+-              printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
++              printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
+               goto out_resume;
+       }
+@@ -148,7 +148,7 @@ static void do_suspend(void)
+       err = stop_machine(xen_suspend, &si, cpumask_of(0));
+-      dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
++      dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+       if (err) {
+               printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index e4982ac..c68e1f2 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -110,6 +110,10 @@ typedef struct pm_message {
+  *    Subsystem-level @suspend() is executed for all devices after invoking
+  *    subsystem-level @prepare() for all of them.
+  *
++ * @suspend_late: Continue operations started by @suspend().  For a number of
++ *    devices @suspend_late() may point to the same callback routine as the
++ *    runtime suspend callback.
++ *
+  * @resume: Executed after waking the system up from a sleep state in which the
+  *    contents of main memory were preserved.  The exact action to perform
+  *    depends on the device's subsystem, but generally the driver is expected
+@@ -122,6 +126,10 @@ typedef struct pm_message {
+  *    Subsystem-level @resume() is executed for all devices after invoking
+  *    subsystem-level @resume_noirq() for all of them.
+  *
++ * @resume_early: Prepare to execute @resume().  For a number of devices
++ *    @resume_early() may point to the same callback routine as the runtime
++ *    resume callback.
++ *
+  * @freeze: Hibernation-specific, executed before creating a hibernation image.
+  *    Analogous to @suspend(), but it should not enable the device to signal
+  *    wakeup events or change its power state.  The majority of subsystems
+@@ -131,6 +139,10 @@ typedef struct pm_message {
+  *    Subsystem-level @freeze() is executed for all devices after invoking
+  *    subsystem-level @prepare() for all of them.
+  *
++ * @freeze_late: Continue operations started by @freeze().  Analogous to
++ *    @suspend_late(), but it should not enable the device to signal wakeup
++ *    events or change its power state.
++ *
+  * @thaw: Hibernation-specific, executed after creating a hibernation image OR
+  *    if the creation of an image has failed.  Also executed after a failing
+  *    attempt to restore the contents of main memory from such an image.
+@@ -140,15 +152,23 @@ typedef struct pm_message {
+  *    subsystem-level @thaw_noirq() for all of them.  It also may be executed
+  *    directly after @freeze() in case of a transition error.
+  *
++ * @thaw_early: Prepare to execute @thaw().  Undo the changes made by the
++ *    preceding @freeze_late().
++ *
+  * @poweroff: Hibernation-specific, executed after saving a hibernation image.
+  *    Analogous to @suspend(), but it need not save the device's settings in
+  *    memory.
+  *    Subsystem-level @poweroff() is executed for all devices after invoking
+  *    subsystem-level @prepare() for all of them.
+  *
++ * @poweroff_late: Continue operations started by @poweroff().  Analogous to
++ *    @suspend_late(), but it need not save the device's settings in memory.
++ *
+  * @restore: Hibernation-specific, executed after restoring the contents of main
+  *    memory from a hibernation image, analogous to @resume().
+  *
++ * @restore_early: Prepare to execute @restore(), analogous to @resume_early().
++ *
+  * @suspend_noirq: Complete the actions started by @suspend().  Carry out any
+  *    additional operations required for suspending the device that might be
+  *    racing with its driver's interrupt handler, which is guaranteed not to
+@@ -158,9 +178,10 @@ typedef struct pm_message {
+  *    @suspend_noirq() has returned successfully.  If the device can generate
+  *    system wakeup signals and is enabled to wake up the system, it should be
+  *    configured to do so at that time.  However, depending on the platform
+- *    and device's subsystem, @suspend() may be allowed to put the device into
+- *    the low-power state and configure it to generate wakeup signals, in
+- *    which case it generally is not necessary to define @suspend_noirq().
++ *    and device's subsystem, @suspend() or @suspend_late() may be allowed to
++ *    put the device into the low-power state and configure it to generate
++ *    wakeup signals, in which case it generally is not necessary to define
++ *    @suspend_noirq().
+  *
+  * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+  *    operations required for resuming the device that might be racing with
+@@ -171,9 +192,9 @@ typedef struct pm_message {
+  *    additional operations required for freezing the device that might be
+  *    racing with its driver's interrupt handler, which is guaranteed not to
+  *    run while @freeze_noirq() is being executed.
+- *    The power state of the device should not be changed by either @freeze()
+- *    or @freeze_noirq() and it should not be configured to signal system
+- *    wakeup by any of these callbacks.
++ *    The power state of the device should not be changed by either @freeze(),
++ *    or @freeze_late(), or @freeze_noirq() and it should not be configured to
++ *    signal system wakeup by any of these callbacks.
+  *
+  * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+  *    operations required for thawing the device that might be racing with its
+@@ -249,6 +270,12 @@ struct dev_pm_ops {
+       int (*thaw)(struct device *dev);
+       int (*poweroff)(struct device *dev);
+       int (*restore)(struct device *dev);
++      int (*suspend_late)(struct device *dev);
++      int (*resume_early)(struct device *dev);
++      int (*freeze_late)(struct device *dev);
++      int (*thaw_early)(struct device *dev);
++      int (*poweroff_late)(struct device *dev);
++      int (*restore_early)(struct device *dev);
+       int (*suspend_noirq)(struct device *dev);
+       int (*resume_noirq)(struct device *dev);
+       int (*freeze_noirq)(struct device *dev);
+@@ -584,13 +611,13 @@ struct dev_pm_domain {
+ #ifdef CONFIG_PM_SLEEP
+ extern void device_pm_lock(void);
+-extern void dpm_resume_noirq(pm_message_t state);
++extern void dpm_resume_start(pm_message_t state);
+ extern void dpm_resume_end(pm_message_t state);
+ extern void dpm_resume(pm_message_t state);
+ extern void dpm_complete(pm_message_t state);
+ extern void device_pm_unlock(void);
+-extern int dpm_suspend_noirq(pm_message_t state);
++extern int dpm_suspend_end(pm_message_t state);
+ extern int dpm_suspend_start(pm_message_t state);
+ extern int dpm_suspend(pm_message_t state);
+ extern int dpm_prepare(pm_message_t state);
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index c08069d..43f0421 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -41,8 +41,10 @@ enum suspend_stat_step {
+       SUSPEND_FREEZE = 1,
+       SUSPEND_PREPARE,
+       SUSPEND_SUSPEND,
++      SUSPEND_SUSPEND_LATE,
+       SUSPEND_SUSPEND_NOIRQ,
+       SUSPEND_RESUME_NOIRQ,
++      SUSPEND_RESUME_EARLY,
+       SUSPEND_RESUME
+ };
+@@ -52,8 +54,10 @@ struct suspend_stats {
+       int     failed_freeze;
+       int     failed_prepare;
+       int     failed_suspend;
++      int     failed_suspend_late;
+       int     failed_suspend_noirq;
+       int     failed_resume;
++      int     failed_resume_early;
+       int     failed_resume_noirq;
+ #define       REC_FAILED_NUM  2
+       int     last_failed_dev;
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 581b553..4f86631 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -1518,13 +1518,13 @@ int kernel_kexec(void)
+               if (error)
+                       goto Resume_console;
+               /* At this point, dpm_suspend_start() has been called,
+-               * but *not* dpm_suspend_noirq(). We *must* call
+-               * dpm_suspend_noirq() now.  Otherwise, drivers for
++               * but *not* dpm_suspend_end(). We *must* call
++               * dpm_suspend_end() now.  Otherwise, drivers for
+                * some devices (e.g. interrupt controllers) become
+                * desynchronized with the actual state of the
+                * hardware at resume time, and evil weirdness ensues.
+                */
+-              error = dpm_suspend_noirq(PMSG_FREEZE);
++              error = dpm_suspend_end(PMSG_FREEZE);
+               if (error)
+                       goto Resume_devices;
+               error = disable_nonboot_cpus();
+@@ -1551,7 +1551,7 @@ int kernel_kexec(void)
+               local_irq_enable();
+  Enable_cpus:
+               enable_nonboot_cpus();
+-              dpm_resume_noirq(PMSG_RESTORE);
++              dpm_resume_start(PMSG_RESTORE);
+  Resume_devices:
+               dpm_resume_end(PMSG_RESTORE);
+  Resume_console:
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 7bef755..f3b03d4 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -245,8 +245,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
+  * create_image - Create a hibernation image.
+  * @platform_mode: Whether or not to use the platform driver.
+  *
+- * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image
+- * and execute the drivers' .thaw_noirq() callbacks.
++ * Execute device drivers' "late" and "noirq" freeze callbacks, create a
++ * hibernation image and run the drivers' "noirq" and "early" thaw callbacks.
+  *
+  * Control reappears in this routine after the subsequent restore.
+  */
+@@ -254,7 +254,7 @@ static int create_image(int platform_mode)
+ {
+       int error;
+-      error = dpm_suspend_noirq(PMSG_FREEZE);
++      error = dpm_suspend_end(PMSG_FREEZE);
+       if (error) {
+               printk(KERN_ERR "PM: Some devices failed to power down, "
+                       "aborting hibernation\n");
+@@ -306,7 +306,7 @@ static int create_image(int platform_mode)
+  Platform_finish:
+       platform_finish(platform_mode);
+-      dpm_resume_noirq(in_suspend ?
++      dpm_resume_start(in_suspend ?
+               (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
+       return error;
+@@ -394,16 +394,16 @@ int hibernation_snapshot(int platform_mode)
+  * resume_target_kernel - Restore system state from a hibernation image.
+  * @platform_mode: Whether or not to use the platform driver.
+  *
+- * Execute device drivers' .freeze_noirq() callbacks, restore the contents of
+- * highmem that have not been restored yet from the image and run the low-level
+- * code that will restore the remaining contents of memory and switch to the
+- * just restored target kernel.
++ * Execute device drivers' "noirq" and "late" freeze callbacks, restore the
++ * contents of highmem that have not been restored yet from the image and run
++ * the low-level code that will restore the remaining contents of memory and
++ * switch to the just restored target kernel.
+  */
+ static int resume_target_kernel(bool platform_mode)
+ {
+       int error;
+-      error = dpm_suspend_noirq(PMSG_QUIESCE);
++      error = dpm_suspend_end(PMSG_QUIESCE);
+       if (error) {
+               printk(KERN_ERR "PM: Some devices failed to power down, "
+                       "aborting resume\n");
+@@ -460,7 +460,7 @@ static int resume_target_kernel(bool platform_mode)
+  Cleanup:
+       platform_restore_cleanup(platform_mode);
+-      dpm_resume_noirq(PMSG_RECOVER);
++      dpm_resume_start(PMSG_RECOVER);
+       return error;
+ }
+@@ -518,7 +518,7 @@ int hibernation_platform_enter(void)
+               goto Resume_devices;
+       }
+-      error = dpm_suspend_noirq(PMSG_HIBERNATE);
++      error = dpm_suspend_end(PMSG_HIBERNATE);
+       if (error)
+               goto Resume_devices;
+@@ -549,7 +549,7 @@ int hibernation_platform_enter(void)
+  Platform_finish:
+       hibernation_ops->finish();
+-      dpm_resume_noirq(PMSG_RESTORE);
++      dpm_resume_start(PMSG_RESTORE);
+  Resume_devices:
+       entering_platform_hibernation = false;
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 9824b41e..8c5014a 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -165,16 +165,20 @@ static int suspend_stats_show(struct seq_file *s, void *unused)
+       last_errno %= REC_FAILED_NUM;
+       last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
+       last_step %= REC_FAILED_NUM;
+-      seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
+-                      "%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
++      seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
++                      "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
+                       "success", suspend_stats.success,
+                       "fail", suspend_stats.fail,
+                       "failed_freeze", suspend_stats.failed_freeze,
+                       "failed_prepare", suspend_stats.failed_prepare,
+                       "failed_suspend", suspend_stats.failed_suspend,
++                      "failed_suspend_late",
++                              suspend_stats.failed_suspend_late,
+                       "failed_suspend_noirq",
+                               suspend_stats.failed_suspend_noirq,
+                       "failed_resume", suspend_stats.failed_resume,
++                      "failed_resume_early",
++                              suspend_stats.failed_resume_early,
+                       "failed_resume_noirq",
+                               suspend_stats.failed_resume_noirq);
+       seq_printf(s,   "failures:\n  last_failed_dev:\t%-s\n",
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 4fd51be..560a639 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -147,7 +147,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
+                       goto Platform_finish;
+       }
+-      error = dpm_suspend_noirq(PMSG_SUSPEND);
++      error = dpm_suspend_end(PMSG_SUSPEND);
+       if (error) {
+               printk(KERN_ERR "PM: Some devices failed to power down\n");
+               goto Platform_finish;
+@@ -189,7 +189,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
+       if (suspend_ops->wake)
+               suspend_ops->wake();
+-      dpm_resume_noirq(PMSG_RESUME);
++      dpm_resume_start(PMSG_RESUME);
+  Platform_finish:
+       if (suspend_ops->finish)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0176-PM-Sleep-Introduce-generic-callbacks-for-new-device-.patch b/patches.runtime_pm/0176-PM-Sleep-Introduce-generic-callbacks-for-new-device-.patch
new file mode 100644 (file)
index 0000000..0d967a2
--- /dev/null
@@ -0,0 +1,316 @@
+From 6b9458e73d77e875f3800ae15ef0500fde2e09e2 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 29 Jan 2012 20:38:41 +0100
+Subject: PM / Sleep: Introduce generic callbacks for new device PM phases
+
+Introduce generic subsystem callbacks for the new phases of device
+suspend/resume during system power transitions: "late suspend",
+"early resume", "late freeze", "early thaw", "late poweroff",
+"early restore".
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit e470d06655e00749f6f9372e4fa4f20cea7ed7c5)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/generic_ops.c |  157 +++++++++++++++++++++++++-------------
+ include/linux/pm.h               |    6 ++
+ 2 files changed, 110 insertions(+), 53 deletions(-)
+
+diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
+index 10bdd79..d03d290 100644
+--- a/drivers/base/power/generic_ops.c
++++ b/drivers/base/power/generic_ops.c
+@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev)
+ }
+ /**
+- * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
+- * @dev: Device to handle.
+- * @event: PM transition of the system under way.
+- * @bool: Whether or not this is the "noirq" stage.
+- *
+- * Execute the PM callback corresponding to @event provided by the driver of
+- * @dev, if defined, and return its error code.    Return 0 if the callback is
+- * not present.
++ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
++ * @dev: Device to suspend.
+  */
+-static int __pm_generic_call(struct device *dev, int event, bool noirq)
++int pm_generic_suspend_noirq(struct device *dev)
+ {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+-      int (*callback)(struct device *);
+-
+-      if (!pm)
+-              return 0;
+-
+-      switch (event) {
+-      case PM_EVENT_SUSPEND:
+-              callback = noirq ? pm->suspend_noirq : pm->suspend;
+-              break;
+-      case PM_EVENT_FREEZE:
+-              callback = noirq ? pm->freeze_noirq : pm->freeze;
+-              break;
+-      case PM_EVENT_HIBERNATE:
+-              callback = noirq ? pm->poweroff_noirq : pm->poweroff;
+-              break;
+-      case PM_EVENT_RESUME:
+-              callback = noirq ? pm->resume_noirq : pm->resume;
+-              break;
+-      case PM_EVENT_THAW:
+-              callback = noirq ? pm->thaw_noirq : pm->thaw;
+-              break;
+-      case PM_EVENT_RESTORE:
+-              callback = noirq ? pm->restore_noirq : pm->restore;
+-              break;
+-      default:
+-              callback = NULL;
+-              break;
+-      }
+-      return callback ? callback(dev) : 0;
++      return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+ }
++EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+ /**
+- * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
++ * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
+  * @dev: Device to suspend.
+  */
+-int pm_generic_suspend_noirq(struct device *dev)
++int pm_generic_suspend_late(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+ }
+-EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
++EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
+ /**
+  * pm_generic_suspend - Generic suspend callback for subsystems.
+@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+  */
+ int pm_generic_suspend(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->suspend ? pm->suspend(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_suspend);
+@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
+  */
+ int pm_generic_freeze_noirq(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+ /**
++ * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
++ * @dev: Device to freeze.
++ */
++int pm_generic_freeze_late(struct device *dev)
++{
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
++}
++EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
++
++/**
+  * pm_generic_freeze - Generic freeze callback for subsystems.
+  * @dev: Device to freeze.
+  */
+ int pm_generic_freeze(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->freeze ? pm->freeze(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_freeze);
+@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
+  */
+ int pm_generic_poweroff_noirq(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+ /**
++ * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
++ * @dev: Device to handle.
++ */
++int pm_generic_poweroff_late(struct device *dev)
++{
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
++}
++EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
++
++/**
+  * pm_generic_poweroff - Generic poweroff callback for subsystems.
+  * @dev: Device to handle.
+  */
+ int pm_generic_poweroff(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+  */
+ int pm_generic_thaw_noirq(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_THAW, true);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+ /**
++ * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
++ * @dev: Device to thaw.
++ */
++int pm_generic_thaw_early(struct device *dev)
++{
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
++}
++EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
++
++/**
+  * pm_generic_thaw - Generic thaw callback for subsystems.
+  * @dev: Device to thaw.
+  */
+ int pm_generic_thaw(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_THAW, false);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->thaw ? pm->thaw(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_thaw);
+@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
+  */
+ int pm_generic_resume_noirq(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_RESUME, true);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+ /**
++ * pm_generic_resume_early - Generic resume_early callback for subsystems.
++ * @dev: Device to resume.
++ */
++int pm_generic_resume_early(struct device *dev)
++{
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->resume_early ? pm->resume_early(dev) : 0;
++}
++EXPORT_SYMBOL_GPL(pm_generic_resume_early);
++
++/**
+  * pm_generic_resume - Generic resume callback for subsystems.
+  * @dev: Device to resume.
+  */
+ int pm_generic_resume(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_RESUME, false);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->resume ? pm->resume(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_resume);
+@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
+  */
+ int pm_generic_restore_noirq(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+ /**
++ * pm_generic_restore_early - Generic restore_early callback for subsystems.
++ * @dev: Device to resume.
++ */
++int pm_generic_restore_early(struct device *dev)
++{
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->restore_early ? pm->restore_early(dev) : 0;
++}
++EXPORT_SYMBOL_GPL(pm_generic_restore_early);
++
++/**
+  * pm_generic_restore - Generic restore callback for subsystems.
+  * @dev: Device to restore.
+  */
+ int pm_generic_restore(struct device *dev)
+ {
+-      return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
++      const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
++
++      return pm && pm->restore ? pm->restore(dev) : 0;
+ }
+ EXPORT_SYMBOL_GPL(pm_generic_restore);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index c68e1f2..73c6105 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -632,17 +632,23 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
+ extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
+ extern int pm_generic_prepare(struct device *dev);
++extern int pm_generic_suspend_late(struct device *dev);
+ extern int pm_generic_suspend_noirq(struct device *dev);
+ extern int pm_generic_suspend(struct device *dev);
++extern int pm_generic_resume_early(struct device *dev);
+ extern int pm_generic_resume_noirq(struct device *dev);
+ extern int pm_generic_resume(struct device *dev);
+ extern int pm_generic_freeze_noirq(struct device *dev);
++extern int pm_generic_freeze_late(struct device *dev);
+ extern int pm_generic_freeze(struct device *dev);
+ extern int pm_generic_thaw_noirq(struct device *dev);
++extern int pm_generic_thaw_early(struct device *dev);
+ extern int pm_generic_thaw(struct device *dev);
+ extern int pm_generic_restore_noirq(struct device *dev);
++extern int pm_generic_restore_early(struct device *dev);
+ extern int pm_generic_restore(struct device *dev);
+ extern int pm_generic_poweroff_noirq(struct device *dev);
++extern int pm_generic_poweroff_late(struct device *dev);
+ extern int pm_generic_poweroff(struct device *dev);
+ extern void pm_generic_complete(struct device *dev);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0177-PM-Domains-Run-late-early-device-suspend-callbacks-a.patch b/patches.runtime_pm/0177-PM-Domains-Run-late-early-device-suspend-callbacks-a.patch
new file mode 100644 (file)
index 0000000..4a88108
--- /dev/null
@@ -0,0 +1,352 @@
+From a6fd82b4d0598a3e72e5803f44de38ddfe6a0151 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sun, 29 Jan 2012 20:39:02 +0100
+Subject: PM / Domains: Run late/early device suspend callbacks at the right
+ time
+
+After the introduction of the late/early phases of device
+suspend/resume during system-wide power transitions it is possible
+to make the generic PM domains code execute its default late/early
+device suspend/resume callbacks during those phases instead of the
+corresponding _noirq phases.  The _noirq device suspend/resume
+phases were only used for executing those callbacks, because this
+was the only way it could be done, but now we can do better.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 0496c8ae366724a0a2136cec09a2e277e782c126)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |  157 ++++++++++++++++++++++++++++++-------------
+ 1 file changed, 111 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 939109b..d2c0323 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -820,17 +820,16 @@ static int pm_genpd_suspend(struct device *dev)
+ }
+ /**
+- * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
++ * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
+  * @dev: Device to suspend.
+  *
+  * Carry out a late suspend of a device under the assumption that its
+  * pm_domain field points to the domain member of an object of type
+  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+  */
+-static int pm_genpd_suspend_noirq(struct device *dev)
++static int pm_genpd_suspend_late(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
+-      int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -838,14 +837,28 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->suspend_power_off)
+-              return 0;
++      return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
++}
+-      ret = genpd_suspend_late(genpd, dev);
+-      if (ret)
+-              return ret;
++/**
++ * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
++ * @dev: Device to suspend.
++ *
++ * Stop the device and remove power from the domain if all devices in it have
++ * been stopped.
++ */
++static int pm_genpd_suspend_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
+-      if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
++      if (genpd->suspend_power_off
++          || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+               return 0;
+       genpd_stop_dev(genpd, dev);
+@@ -862,13 +875,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+ }
+ /**
+- * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
++ * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+  * @dev: Device to resume.
+  *
+- * Carry out an early resume of a device under the assumption that its
+- * pm_domain field points to the domain member of an object of type
+- * struct generic_pm_domain representing a power domain consisting of I/O
+- * devices.
++ * Restore power to the device's PM domain, if necessary, and start the device.
+  */
+ static int pm_genpd_resume_noirq(struct device *dev)
+ {
+@@ -890,13 +900,34 @@ static int pm_genpd_resume_noirq(struct device *dev)
+        */
+       pm_genpd_poweron(genpd);
+       genpd->suspended_count--;
+-      genpd_start_dev(genpd, dev);
+-      return genpd_resume_early(genpd, dev);
++      return genpd_start_dev(genpd, dev);
+ }
+ /**
+- * pm_genpd_resume - Resume a device belonging to an I/O power domain.
++ * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
++ * @dev: Device to resume.
++ *
++ * Carry out an early resume of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a power domain consisting of I/O
++ * devices.
++ */
++static int pm_genpd_resume_early(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
++}
++
++/**
++ * pm_genpd_resume - Resume of device in an I/O PM domain.
+  * @dev: Device to resume.
+  *
+  * Resume a device under the assumption that its pm_domain field points to the
+@@ -917,7 +948,7 @@ static int pm_genpd_resume(struct device *dev)
+ }
+ /**
+- * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
++ * pm_genpd_freeze - Freezing a device in an I/O PM domain.
+  * @dev: Device to freeze.
+  *
+  * Freeze a device under the assumption that its pm_domain field points to the
+@@ -938,7 +969,29 @@ static int pm_genpd_freeze(struct device *dev)
+ }
+ /**
+- * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
++ * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
++ * @dev: Device to freeze.
++ *
++ * Carry out a late freeze of a device under the assumption that its
++ * pm_domain field points to the domain member of an object of type
++ * struct generic_pm_domain representing a power domain consisting of I/O
++ * devices.
++ */
++static int pm_genpd_freeze_late(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
++
++      dev_dbg(dev, "%s()\n", __func__);
++
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
++}
++
++/**
++ * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+  * @dev: Device to freeze.
+  *
+  * Carry out a late freeze of a device under the assumption that its
+@@ -949,7 +1002,6 @@ static int pm_genpd_freeze(struct device *dev)
+ static int pm_genpd_freeze_noirq(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
+-      int ret;
+       dev_dbg(dev, "%s()\n", __func__);
+@@ -957,20 +1009,31 @@ static int pm_genpd_freeze_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->suspend_power_off)
+-              return 0;
++      return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
++}
+-      ret = genpd_freeze_late(genpd, dev);
+-      if (ret)
+-              return ret;
++/**
++ * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
++ * @dev: Device to thaw.
++ *
++ * Start the device, unless power has been removed from the domain already
++ * before the system transition.
++ */
++static int pm_genpd_thaw_noirq(struct device *dev)
++{
++      struct generic_pm_domain *genpd;
+-      genpd_stop_dev(genpd, dev);
++      dev_dbg(dev, "%s()\n", __func__);
+-      return 0;
++      genpd = dev_to_genpd(dev);
++      if (IS_ERR(genpd))
++              return -EINVAL;
++
++      return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
+ }
+ /**
+- * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
++ * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
+  * @dev: Device to thaw.
+  *
+  * Carry out an early thaw of a device under the assumption that its
+@@ -978,7 +1041,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
+  * struct generic_pm_domain representing a power domain consisting of I/O
+  * devices.
+  */
+-static int pm_genpd_thaw_noirq(struct device *dev)
++static int pm_genpd_thaw_early(struct device *dev)
+ {
+       struct generic_pm_domain *genpd;
+@@ -988,12 +1051,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->suspend_power_off)
+-              return 0;
+-
+-      genpd_start_dev(genpd, dev);
+-
+-      return genpd_thaw_early(genpd, dev);
++      return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
+ }
+ /**
+@@ -1018,13 +1076,11 @@ static int pm_genpd_thaw(struct device *dev)
+ }
+ /**
+- * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
++ * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+  * @dev: Device to resume.
+  *
+- * Carry out an early restore of a device under the assumption that its
+- * pm_domain field points to the domain member of an object of type
+- * struct generic_pm_domain representing a power domain consisting of I/O
+- * devices.
++ * Make sure the domain will be in the same power state as before the
++ * hibernation the system is resuming from and start the device if necessary.
+  */
+ static int pm_genpd_restore_noirq(struct device *dev)
+ {
+@@ -1054,9 +1110,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
+       pm_genpd_poweron(genpd);
+       genpd->suspended_count--;
+-      genpd_start_dev(genpd, dev);
+-      return genpd_resume_early(genpd, dev);
++      return genpd_start_dev(genpd, dev);
+ }
+ /**
+@@ -1099,11 +1154,15 @@ static void pm_genpd_complete(struct device *dev)
+ #define pm_genpd_prepare              NULL
+ #define pm_genpd_suspend              NULL
++#define pm_genpd_suspend_late         NULL
+ #define pm_genpd_suspend_noirq                NULL
++#define pm_genpd_resume_early         NULL
+ #define pm_genpd_resume_noirq         NULL
+ #define pm_genpd_resume                       NULL
+ #define pm_genpd_freeze                       NULL
++#define pm_genpd_freeze_late          NULL
+ #define pm_genpd_freeze_noirq         NULL
++#define pm_genpd_thaw_early           NULL
+ #define pm_genpd_thaw_noirq           NULL
+ #define pm_genpd_thaw                 NULL
+ #define pm_genpd_restore_noirq                NULL
+@@ -1482,7 +1541,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
+ {
+       int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+-      return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
++      return cb ? cb(dev) : pm_generic_suspend_late(dev);
+ }
+ /**
+@@ -1493,7 +1552,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
+ {
+       int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+-      return cb ? cb(dev) : pm_generic_resume_noirq(dev);
++      return cb ? cb(dev) : pm_generic_resume_early(dev);
+ }
+ /**
+@@ -1526,7 +1585,7 @@ static int pm_genpd_default_freeze_late(struct device *dev)
+ {
+       int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
+-      return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
++      return cb ? cb(dev) : pm_generic_freeze_late(dev);
+ }
+ /**
+@@ -1537,7 +1596,7 @@ static int pm_genpd_default_thaw_early(struct device *dev)
+ {
+       int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
+-      return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
++      return cb ? cb(dev) : pm_generic_thaw_early(dev);
+ }
+ /**
+@@ -1596,16 +1655,22 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
+       genpd->domain.ops.prepare = pm_genpd_prepare;
+       genpd->domain.ops.suspend = pm_genpd_suspend;
++      genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
+       genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
+       genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
++      genpd->domain.ops.resume_early = pm_genpd_resume_early;
+       genpd->domain.ops.resume = pm_genpd_resume;
+       genpd->domain.ops.freeze = pm_genpd_freeze;
++      genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
+       genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
+       genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
++      genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
+       genpd->domain.ops.thaw = pm_genpd_thaw;
+       genpd->domain.ops.poweroff = pm_genpd_suspend;
++      genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
+       genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
+       genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
++      genpd->domain.ops.restore_early = pm_genpd_resume_early;
+       genpd->domain.ops.restore = pm_genpd_resume;
+       genpd->domain.ops.complete = pm_genpd_complete;
+       genpd->dev_ops.save_state = pm_genpd_default_save_state;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0178-PM-QoS-Simplify-PM-QoS-expansion-merge.patch b/patches.runtime_pm/0178-PM-QoS-Simplify-PM-QoS-expansion-merge.patch
new file mode 100644 (file)
index 0000000..6e6269f
--- /dev/null
@@ -0,0 +1,89 @@
+From 13e5f915ea6eb2bb64310f9b922ecc0b54f70267 Mon Sep 17 00:00:00 2001
+From: Alex Frid <afrid@nvidia.com>
+Date: Sun, 29 Jan 2012 20:39:25 +0100
+Subject: PM / QoS: Simplify PM QoS expansion/merge
+
+ - Replace class ID #define with enumeration
+ - Loop through PM QoS objects during initialization (rather than
+   initializing them one-by-one)
+
+Signed-off-by: Alex Frid <afrid@nvidia.com>
+Reviewed-by: Antti Miettinen <amiettinen@nvidia.com>
+Reviewed-by: Diwakar Tundlam <dtundlam@nvidia.com>
+Reviewed-by: Scott Williams <scwilliams@nvidia.com>
+Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
+Acked-by: markgross <markgross@thegnar.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit d031e1de2c5ba91e67ed83f6adf624543ab2b03d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_qos.h |   14 +++++++++-----
+ kernel/power/qos.c     |   23 ++++++++++-------------
+ 2 files changed, 19 insertions(+), 18 deletions(-)
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index e5bbcba..5ac91d8 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -9,12 +9,16 @@
+ #include <linux/miscdevice.h>
+ #include <linux/device.h>
+-#define PM_QOS_RESERVED 0
+-#define PM_QOS_CPU_DMA_LATENCY 1
+-#define PM_QOS_NETWORK_LATENCY 2
+-#define PM_QOS_NETWORK_THROUGHPUT 3
++enum {
++      PM_QOS_RESERVED = 0,
++      PM_QOS_CPU_DMA_LATENCY,
++      PM_QOS_NETWORK_LATENCY,
++      PM_QOS_NETWORK_THROUGHPUT,
++
++      /* insert new class ID */
++      PM_QOS_NUM_CLASSES,
++};
+-#define PM_QOS_NUM_CLASSES 4
+ #define PM_QOS_DEFAULT_VALUE -1
+ #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE      (2000 * USEC_PER_SEC)
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 995e3bd..d6d6dbd 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -469,21 +469,18 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ static int __init pm_qos_power_init(void)
+ {
+       int ret = 0;
++      int i;
+-      ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+-      if (ret < 0) {
+-              printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
+-              return ret;
+-      }
+-      ret = register_pm_qos_misc(&network_lat_pm_qos);
+-      if (ret < 0) {
+-              printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
+-              return ret;
++      BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
++
++      for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
++              ret = register_pm_qos_misc(pm_qos_array[i]);
++              if (ret < 0) {
++                      printk(KERN_ERR "pm_qos_param: %s setup failed\n",
++                             pm_qos_array[i]->name);
++                      return ret;
++              }
+       }
+-      ret = register_pm_qos_misc(&network_throughput_pm_qos);
+-      if (ret < 0)
+-              printk(KERN_ERR
+-                      "pm_qos_param: network_throughput setup failed\n");
+       return ret;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0179-PM-Hibernate-Thaw-kernel-threads-in-SNAPSHOT_CREATE_.patch b/patches.runtime_pm/0179-PM-Hibernate-Thaw-kernel-threads-in-SNAPSHOT_CREATE_.patch
new file mode 100644 (file)
index 0000000..e38db21
--- /dev/null
@@ -0,0 +1,49 @@
+From 0d3b27d7ec5e0e8c4ec4f18e0543bd9a307519ee Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Wed, 1 Feb 2012 22:16:36 +0100
+Subject: PM / Hibernate: Thaw kernel threads in SNAPSHOT_CREATE_IMAGE ioctl
+ path
+
+In the SNAPSHOT_CREATE_IMAGE ioctl, if the call to hibernation_snapshot()
+fails, the frozen tasks are not thawed.
+
+And in the case of success, if we happen to exit due to a successful freezer
+test, all tasks (including those of userspace) are thawed, whereas actually
+we should have thawed only the kernel threads at that point. Fix both these
+issues.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Cc: stable@vger.kernel.org
+(cherry picked from commit fe9161db2e6053da21e4649d77bbefaf3030b11d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/user.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index e5a21a8..3e10007 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -249,13 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               }
+               pm_restore_gfp_mask();
+               error = hibernation_snapshot(data->platform_support);
+-              if (!error) {
++              if (error) {
++                      thaw_kernel_threads();
++              } else {
+                       error = put_user(in_suspend, (int __user *)arg);
+                       if (!error && !freezer_test_done)
+                               data->ready = 1;
+                       if (freezer_test_done) {
+                               freezer_test_done = false;
+-                              thaw_processes();
++                              thaw_kernel_threads();
+                       }
+               }
+               break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0180-PM-Freezer-Thaw-only-kernel-threads-if-freezing-of-k.patch b/patches.runtime_pm/0180-PM-Freezer-Thaw-only-kernel-threads-if-freezing-of-k.patch
new file mode 100644 (file)
index 0000000..02c7394
--- /dev/null
@@ -0,0 +1,110 @@
+From 6d62dfd237637ac51b285e869bcd3c03326681fd Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Fri, 3 Feb 2012 22:22:41 +0100
+Subject: PM / Freezer: Thaw only kernel threads if freezing of kernel threads
+ fails
+
+If freezing of kernel threads fails, we are expected to automatically
+thaw tasks in the error recovery path. However, at times, we encounter
+situations in which we would like the automatic error recovery path
+to thaw only the kernel threads, because we want to be able to do
+some more cleanup before we thaw userspace. Something like:
+
+error = freeze_kernel_threads();
+if (error) {
+       /* Do some cleanup */
+
+       /* Only then thaw userspace tasks*/
+       thaw_processes();
+}
+
+An example of such a situation is where we freeze/thaw filesystems
+during suspend/hibernation. There, if freezing of kernel threads
+fails, we would like to thaw the frozen filesystems before thawing
+the userspace tasks.
+
+So, modify freeze_kernel_threads() to thaw only kernel threads in
+case of freezing failure. And change suspend_freeze_processes()
+accordingly. (At the same time, let us also get rid of the rather
+cryptic usage of the conditional operator (:?) in that function.)
+
+[rjw: In fact, this patch fixes a regression introduced during the
+ 3.3 merge window, because without it thaw_processes() may be called
+ before swsusp_free() in some situations and that may lead to massive
+ memory allocation failures.]
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Acked-by: Nigel Cunningham <nigel@tuxonice.net>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 379e0be812ab8a2a351e784b0c987788f5123090)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/power.h   |   24 ++++++++++++++++++++++--
+ kernel/power/process.c |    7 +++++--
+ 2 files changed, 27 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 0c4defe..21724ee 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -231,8 +231,28 @@ extern int pm_test_level;
+ #ifdef CONFIG_SUSPEND_FREEZER
+ static inline int suspend_freeze_processes(void)
+ {
+-      int error = freeze_processes();
+-      return error ? : freeze_kernel_threads();
++      int error;
++
++      error = freeze_processes();
++
++      /*
++       * freeze_processes() automatically thaws every task if freezing
++       * fails. So we need not do anything extra upon error.
++       */
++      if (error)
++              goto Finish;
++
++      error = freeze_kernel_threads();
++
++      /*
++       * freeze_kernel_threads() thaws only kernel threads upon freezing
++       * failure. So we have to thaw the userspace tasks ourselves.
++       */
++      if (error)
++              thaw_processes();
++
++ Finish:
++      return error;
+ }
+ static inline void suspend_thaw_processes(void)
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index eeca003..7e42645 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -143,7 +143,10 @@ int freeze_processes(void)
+ /**
+  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
+  *
+- * On success, returns 0.  On failure, -errno and system is fully thawed.
++ * On success, returns 0.  On failure, -errno and only the kernel threads are
++ * thawed, so as to give a chance to the caller to do additional cleanups
++ * (if any) before thawing the userspace tasks. So, it is the responsibility
++ * of the caller to thaw the userspace tasks, when the time is right.
+  */
+ int freeze_kernel_threads(void)
+ {
+@@ -159,7 +162,7 @@ int freeze_kernel_threads(void)
+       BUG_ON(in_atomic());
+       if (error)
+-              thaw_processes();
++              thaw_kernel_threads();
+       return error;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0181-PM-QoS-CPU-C-state-breakage-with-PM-Qos-change.patch b/patches.runtime_pm/0181-PM-QoS-CPU-C-state-breakage-with-PM-Qos-change.patch
new file mode 100644 (file)
index 0000000..b217d0b
--- /dev/null
@@ -0,0 +1,58 @@
+From 2e9454c75fb01bd194f45e92f5211fa276b733cd Mon Sep 17 00:00:00 2001
+From: Venkatesh Pallipadi <venki@google.com>
+Date: Fri, 3 Feb 2012 22:22:25 +0100
+Subject: PM / QoS: CPU C-state breakage with PM Qos change
+
+Looks like change "PM QoS: Move and rename the implementation files"
+merged during the 3.2 development cycle made PM QoS depend on
+CONFIG_PM which depends on (PM_SLEEP || PM_RUNTIME).
+
+That breaks CPU C-states with kernels not having these CONFIGs, causing CPUs
+to spend time in Polling loop idle instead of going into deep C-states,
+consuming way way more power. This is with either acpi idle or intel idle
+enabled.
+
+Either CONFIG_PM should be enabled with any pm_qos users or
+the !CONFIG_PM pm_qos_request() should return sane defaults not to break
+the existing users. Here's is the patch for the latter option.
+
+[rjw: Modified the changelog slightly.]
+
+Signed-off-by: Venkatesh Pallipadi <venki@google.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Cc: stable@vger.kernel.org
+(cherry picked from commit d020283dc694c9ec31b410f522252f7a8397e67d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_qos.h |   14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 5ac91d8..67c5217 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -114,7 +114,19 @@ static inline void pm_qos_remove_request(struct pm_qos_request *req)
+                       { return; }
+ static inline int pm_qos_request(int pm_qos_class)
+-                      { return 0; }
++{
++      switch (pm_qos_class) {
++      case PM_QOS_CPU_DMA_LATENCY:
++              return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
++      case PM_QOS_NETWORK_LATENCY:
++              return PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
++      case PM_QOS_NETWORK_THROUGHPUT:
++              return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
++      default:
++              return PM_QOS_DEFAULT_VALUE;
++      }
++}
++
+ static inline int pm_qos_add_notifier(int pm_qos_class,
+                                     struct notifier_block *notifier)
+                       { return 0; }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0182-PM-Suspend-Avoid-code-duplication-in-suspend-statist.patch b/patches.runtime_pm/0182-PM-Suspend-Avoid-code-duplication-in-suspend-statist.patch
new file mode 100644 (file)
index 0000000..021adb7
--- /dev/null
@@ -0,0 +1,93 @@
+From 8c3cb27999de30afd71511f033f7ac9f9899ed1f Mon Sep 17 00:00:00 2001
+From: Marcos Paulo de Souza <marcos.mage@gmail.com>
+Date: Sat, 4 Feb 2012 22:26:13 +0100
+Subject: PM / Suspend: Avoid code duplication in suspend statistics update
+
+The code
+       if (error) {
+               suspend_stats.fail++;
+               dpm_save_failed_errno(error);
+       } else
+               suspend_stats.success++;
+
+Appears in the kernel/power/main.c and kernel/power/suspend.c.
+
+This patch just creates a new function to avoid duplicated code.
+
+Suggested-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Marcos Paulo de Souza <marcos.mage@gmail.com>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 8916e3702ec422b57cc549fbae3986106292100f)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/suspend.h |   16 ++++++++++++++++
+ kernel/power/main.c     |    6 +-----
+ kernel/power/suspend.c  |    6 +-----
+ 3 files changed, 18 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index 43f0421..ec59fb0 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -94,6 +94,22 @@ static inline void dpm_save_failed_step(enum suspend_stat_step step)
+ }
+ /**
++ * suspend_stats_update - Update success/failure statistics of suspend-to-ram
++ *
++ * @error: Value returned by enter_state() function
++ */
++static inline void suspend_stats_update(int error)
++{
++      if (error) {
++              suspend_stats.fail++;
++              dpm_save_failed_errno(error);
++      } else {
++              suspend_stats.success++;
++      }
++}
++
++
++/**
+  * struct platform_suspend_ops - Callbacks for managing platform dependent
+  *    system sleep states.
+  *
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 8c5014a..b1e3248 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -296,11 +296,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+       }
+       if (state < PM_SUSPEND_MAX && *s) {
+               error = enter_state(state);
+-              if (error) {
+-                      suspend_stats.fail++;
+-                      dpm_save_failed_errno(error);
+-              } else
+-                      suspend_stats.success++;
++              suspend_stats_update(error);
+       }
+ #endif
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 560a639..03bc92b 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -321,11 +321,7 @@ int pm_suspend(suspend_state_t state)
+       int ret;
+       if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) {
+               ret = enter_state(state);
+-              if (ret) {
+-                      suspend_stats.fail++;
+-                      dpm_save_failed_errno(ret);
+-              } else
+-                      suspend_stats.success++;
++              suspend_stats_update(ret);
+               return ret;
+       }
+       return -EINVAL;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0183-PM-Freezer-Docs-Document-the-beauty-of-freeze-thaw-s.patch b/patches.runtime_pm/0183-PM-Freezer-Docs-Document-the-beauty-of-freeze-thaw-s.patch
new file mode 100644 (file)
index 0000000..fa39bad
--- /dev/null
@@ -0,0 +1,60 @@
+From d1b7eced0184f8fb0776c37885e5897eb072173d Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Sat, 4 Feb 2012 22:26:26 +0100
+Subject: PM / Freezer / Docs: Document the beauty of freeze/thaw semantics
+
+The way the different freeze/thaw functions encapsulate each other are quite
+lovely from a design point of view. And as a side-effect, the way in which
+they are invoked (cleaning up on failure for example) differs significantly
+from how usual functions are dealt with. This is because of the underlying
+semantics that govern the freezing and thawing of various tasks.
+
+This subtle aspect that differentiates these functions from the rest, is
+worth documenting.
+
+Many thanks to Tejun Heo for providing enlightenment on this topic.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 9045a05044268b075c13bb0284601b24959dc3c6)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/power/freezing-of-tasks.txt |   21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
+index ebd7490..ec715cd 100644
+--- a/Documentation/power/freezing-of-tasks.txt
++++ b/Documentation/power/freezing-of-tasks.txt
+@@ -63,6 +63,27 @@ devices have been reinitialized, the function thaw_processes() is called in
+ order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that
+ have been frozen leave __refrigerator() and continue running.
++
++Rationale behind the functions dealing with freezing and thawing of tasks:
++-------------------------------------------------------------------------
++
++freeze_processes():
++  - freezes only userspace tasks
++
++freeze_kernel_threads():
++  - freezes all tasks (including kernel threads) because we can't freeze
++    kernel threads without freezing userspace tasks
++
++thaw_kernel_threads():
++  - thaws only kernel threads; this is particularly useful if we need to do
++    anything special in between thawing of kernel threads and thawing of
++    userspace tasks, or if we want to postpone the thawing of userspace tasks
++
++thaw_processes():
++  - thaws all tasks (including kernel threads) because we can't thaw userspace
++    tasks without thawing kernel threads
++
++
+ III. Which kernel threads are freezable?
+ Kernel threads are not freezable by default.  However, a kernel thread may clear
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0184-PM-Hibernate-Thaw-kernel-threads-in-hibernation_snap.patch b/patches.runtime_pm/0184-PM-Hibernate-Thaw-kernel-threads-in-hibernation_snap.patch
new file mode 100644 (file)
index 0000000..a59bbea
--- /dev/null
@@ -0,0 +1,77 @@
+From 77e33b76bdc5b44c3ae937e3d309ea66af7f59ac Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Sat, 4 Feb 2012 22:26:38 +0100
+Subject: PM / Hibernate: Thaw kernel threads in hibernation_snapshot() in
+ error/test path
+
+In the hibernation call path, the kernel threads are frozen inside
+hibernation_snapshot(). If we happen to encounter an error further down
+the road or if we are exiting early due to a successful freezer test,
+then thaw kernel threads before returning to the caller.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 51d6ff7acd920379f54d0be4dbe844a46178a65f)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |    6 ++++--
+ kernel/power/user.c      |    8 ++------
+ 2 files changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index f3b03d4..23c1fe3 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -343,13 +343,13 @@ int hibernation_snapshot(int platform_mode)
+                * successful freezer test.
+                */
+               freezer_test_done = true;
+-              goto Cleanup;
++              goto Thaw;
+       }
+       error = dpm_prepare(PMSG_FREEZE);
+       if (error) {
+               dpm_complete(PMSG_RECOVER);
+-              goto Cleanup;
++              goto Thaw;
+       }
+       suspend_console();
+@@ -385,6 +385,8 @@ int hibernation_snapshot(int platform_mode)
+       platform_end(platform_mode);
+       return error;
++ Thaw:
++      thaw_kernel_threads();
+  Cleanup:
+       swsusp_free();
+       goto Close;
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 3e10007..7bee91f 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -249,16 +249,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               }
+               pm_restore_gfp_mask();
+               error = hibernation_snapshot(data->platform_support);
+-              if (error) {
+-                      thaw_kernel_threads();
+-              } else {
++              if (!error) {
+                       error = put_user(in_suspend, (int __user *)arg);
+                       if (!error && !freezer_test_done)
+                               data->ready = 1;
+-                      if (freezer_test_done) {
++                      if (freezer_test_done)
+                               freezer_test_done = false;
+-                              thaw_kernel_threads();
+-                      }
+               }
+               break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0185-PM-Hibernate-Refactor-and-simplify-freezer_test_done.patch b/patches.runtime_pm/0185-PM-Hibernate-Refactor-and-simplify-freezer_test_done.patch
new file mode 100644 (file)
index 0000000..2546f8c
--- /dev/null
@@ -0,0 +1,71 @@
+From 1200a11c7f5d2b2e984bd84ccec59bad00bb5b5c Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Sat, 4 Feb 2012 23:39:56 +0100
+Subject: PM / Hibernate: Refactor and simplify freezer_test_done
+
+The code related to 'freezer_test_done' is needlessly convoluted.
+Refactor the code and simplify the implementation.
+
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit a556d5b58345ccf51826b9ceac078072f830738b)
+
+Conflicts:
+
+       kernel/power/hibernate.c
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   10 +++++-----
+ kernel/power/user.c      |    6 ++----
+ 2 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 23c1fe3..0a186cf 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -629,12 +629,8 @@ int hibernate(void)
+               goto Free_bitmaps;
+       error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
+-      if (error)
+-              goto Thaw;
+-      if (freezer_test_done) {
+-              freezer_test_done = false;
++      if (error || freezer_test_done)
+               goto Thaw;
+-      }
+       if (in_suspend) {
+               unsigned int flags = 0;
+@@ -659,6 +655,10 @@ int hibernate(void)
+  Thaw:
+       thaw_processes();
++
++      /* Don't bother checking whether freezer_test_done is true */
++      freezer_test_done = false;
++
+  Free_bitmaps:
+       free_basic_memory_bitmaps();
+  Enable_umh:
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 7bee91f..33c4329 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -251,10 +251,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               error = hibernation_snapshot(data->platform_support);
+               if (!error) {
+                       error = put_user(in_suspend, (int __user *)arg);
+-                      if (!error && !freezer_test_done)
+-                              data->ready = 1;
+-                      if (freezer_test_done)
+-                              freezer_test_done = false;
++                      data->ready = !freezer_test_done && !error;
++                      freezer_test_done = false;
+               }
+               break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0186-PM-Domains-Provide-a-dummy-dev_gpd_data-when-generic.patch b/patches.runtime_pm/0186-PM-Domains-Provide-a-dummy-dev_gpd_data-when-generic.patch
new file mode 100644 (file)
index 0000000..c35374c
--- /dev/null
@@ -0,0 +1,52 @@
+From 9d12ae0814fa170619c2cfe00194e8801e3b16c9 Mon Sep 17 00:00:00 2001
+From: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Date: Sat, 4 Feb 2012 22:26:49 +0100
+Subject: PM / Domains: Provide a dummy dev_gpd_data() when generic domains
+ are not used
+
+dev_gpd_data() is a generic macro, also useful for drivers. Hence it should
+be available also when CONFIG_PM_GENERIC_DOMAINS is not selected. OTOH,
+to_gpd_data() is so far unused outside of the generic PM domain code and
+does not seem to be very useful without CONFIG_PM_GENERIC_DOMAINS.
+
+Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 9b4f617b1c2004332113b4a2c89dfb6e8029c987)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_domain.h |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index e3ff875..e76cc9a 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -101,12 +101,12 @@ struct generic_pm_domain_data {
+       bool need_restore;
+ };
++#ifdef CONFIG_PM_GENERIC_DOMAINS
+ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
+ {
+       return container_of(pdd, struct generic_pm_domain_data, base);
+ }
+-#ifdef CONFIG_PM_GENERIC_DOMAINS
+ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+ {
+       return to_gpd_data(dev->power.subsys_data->domain_data);
+@@ -207,6 +207,10 @@ static inline bool default_stop_ok(struct device *dev)
+       return false;
+ }
+ #define pm_domain_always_on_gov NULL
++static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
++{
++      return NULL;
++}
+ #endif
+ static inline int pm_genpd_remove_callbacks(struct device *dev)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0187-PM-Make-sysrq-o-be-available-for-CONFIG_PM-unset.patch b/patches.runtime_pm/0187-PM-Make-sysrq-o-be-available-for-CONFIG_PM-unset.patch
new file mode 100644 (file)
index 0000000..32dc187
--- /dev/null
@@ -0,0 +1,53 @@
+From 7089d03dd9df82aa9f6f08743eeadad0aa682b53 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 14 Jan 2012 00:33:03 +0100
+Subject: PM: Make sysrq-o be available for CONFIG_PM unset
+
+After commit 1eb208aea3179dd2fc0cdeea45ef869d75b4fe70, "PM: Make
+CONFIG_PM depend on (CONFIG_PM_SLEEP || CONFIG_PM_RUNTIME)", the
+files under kernel/power are not built unless CONFIG_PM_SLEEP or
+CONFIG_PM_RUNTIME is set.  In particular, this causes
+kernel/power/poweroff.c to be omitted, even though it should be
+compiled, because CONFIG_MAGIC_SYSRQ is set.
+
+Fix the problem by causing kernel/power/Makefile to be processed
+for CONFIG_PM unset too.
+
+Reported-and-tested-by: Phil Oester <kernel@linuxace.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit dae5cbc2440b1d21a15715d0f1fb20f632dd38ee)
+
+Conflicts:
+
+       kernel/Makefile
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/Makefile |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/Makefile b/kernel/Makefile
+index de5198f..856f9d6 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -24,6 +24,8 @@ CFLAGS_REMOVE_sched_clock.o = -pg
+ CFLAGS_REMOVE_irq_work.o = -pg
+ endif
++obj-y += power/
++
+ obj-$(CONFIG_FREEZER) += freezer.o
+ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
+@@ -52,8 +54,6 @@ obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+ obj-$(CONFIG_UID16) += uid16.o
+ obj-$(CONFIG_MODULES) += module.o
+ obj-$(CONFIG_KALLSYMS) += kallsyms.o
+-obj-$(CONFIG_PM) += power/
+-obj-$(CONFIG_FREEZER) += power/
+ obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
+ obj-$(CONFIG_KEXEC) += kexec.o
+ obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0188-PM-QoS-unconditionally-build-the-feature.patch b/patches.runtime_pm/0188-PM-QoS-unconditionally-build-the-feature.patch
new file mode 100644 (file)
index 0000000..995ee11
--- /dev/null
@@ -0,0 +1,116 @@
+From 2af3898d131f8372e89abca681a971abffa9b51a Mon Sep 17 00:00:00 2001
+From: Jean Pihet <jean.pihet@newoldbits.com>
+Date: Mon, 13 Feb 2012 16:23:42 +0100
+Subject: PM / QoS: unconditionally build the feature
+
+The PM QoS feature originally didn't depend on CONFIG_PM, which was
+mistakenly changed by commit e8db0be1245de16a6cc6365506abc392c3c212d4
+
+    PM QoS: Move and rename the implementation files
+
+Later, commit d020283dc694c9ec31b410f522252f7a8397e67d
+
+    PM / QoS: CPU C-state breakage with PM Qos change
+
+partially fixed that by introducing a static inline definition of
+pm_qos_request(), but that still didn't allow user space to use
+the PM QoS interface if CONFIG_PM was unset (which had been possible
+before).  For this reason, remove the dependency of PM QoS on
+CONFIG_PM to make it work (as intended) with CONFIG_PM unset.
+
+[rjw: Replaced the original changelog with a new one.]
+
+Signed-off-by: Jean Pihet <j-pihet@ti.com>
+Reported-by: Venkatesh Pallipadi <venki@google.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit a9b542ee607a8afafa9447292394959fc84ea650)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_qos.h |   41 +----------------------------------------
+ kernel/power/Makefile  |    3 ++-
+ 2 files changed, 3 insertions(+), 41 deletions(-)
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 67c5217..c8a541e 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -67,7 +67,6 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
+       return req->dev != 0;
+ }
+-#ifdef CONFIG_PM
+ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+                        enum pm_qos_req_action action, int value);
+ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+@@ -82,6 +81,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+ int pm_qos_request_active(struct pm_qos_request *req);
+ s32 pm_qos_read_value(struct pm_qos_constraints *c);
++#ifdef CONFIG_PM
+ s32 __dev_pm_qos_read_value(struct device *dev);
+ s32 dev_pm_qos_read_value(struct device *dev);
+ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+@@ -99,45 +99,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev);
+ int dev_pm_qos_add_ancestor_request(struct device *dev,
+                                   struct dev_pm_qos_request *req, s32 value);
+ #else
+-static inline int pm_qos_update_target(struct pm_qos_constraints *c,
+-                                     struct plist_node *node,
+-                                     enum pm_qos_req_action action,
+-                                     int value)
+-                      { return 0; }
+-static inline void pm_qos_add_request(struct pm_qos_request *req,
+-                                    int pm_qos_class, s32 value)
+-                      { return; }
+-static inline void pm_qos_update_request(struct pm_qos_request *req,
+-                                       s32 new_value)
+-                      { return; }
+-static inline void pm_qos_remove_request(struct pm_qos_request *req)
+-                      { return; }
+-
+-static inline int pm_qos_request(int pm_qos_class)
+-{
+-      switch (pm_qos_class) {
+-      case PM_QOS_CPU_DMA_LATENCY:
+-              return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+-      case PM_QOS_NETWORK_LATENCY:
+-              return PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
+-      case PM_QOS_NETWORK_THROUGHPUT:
+-              return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
+-      default:
+-              return PM_QOS_DEFAULT_VALUE;
+-      }
+-}
+-
+-static inline int pm_qos_add_notifier(int pm_qos_class,
+-                                    struct notifier_block *notifier)
+-                      { return 0; }
+-static inline int pm_qos_remove_notifier(int pm_qos_class,
+-                                       struct notifier_block *notifier)
+-                      { return 0; }
+-static inline int pm_qos_request_active(struct pm_qos_request *req)
+-                      { return 0; }
+-static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
+-                      { return 0; }
+-
+ static inline s32 __dev_pm_qos_read_value(struct device *dev)
+                       { return 0; }
+ static inline s32 dev_pm_qos_read_value(struct device *dev)
+diff --git a/kernel/power/Makefile b/kernel/power/Makefile
+index 07e0e28..66d808e 100644
+--- a/kernel/power/Makefile
++++ b/kernel/power/Makefile
+@@ -1,7 +1,8 @@
+ ccflags-$(CONFIG_PM_DEBUG)    := -DDEBUG
+-obj-$(CONFIG_PM)              += main.o qos.o
++obj-y                         += qos.o
++obj-$(CONFIG_PM)              += main.o
+ obj-$(CONFIG_VT_CONSOLE_SLEEP)        += console.o
+ obj-$(CONFIG_FREEZER)         += process.o
+ obj-$(CONFIG_SUSPEND)         += suspend.o
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0189-PM-Sleep-Initialize-wakeup-source-locks-in-wakeup_so.patch b/patches.runtime_pm/0189-PM-Sleep-Initialize-wakeup-source-locks-in-wakeup_so.patch
new file mode 100644 (file)
index 0000000..4cc7442
--- /dev/null
@@ -0,0 +1,41 @@
+From 786f7c4222b9aaf620afad10c7474d9d60b2d2e5 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 11 Feb 2012 00:00:11 +0100
+Subject: PM / Sleep: Initialize wakeup source locks in wakeup_source_add()
+
+Initialize wakeup source locks in wakeup_source_add() instead of
+wakeup_source_create(), because otherwise the locks of the wakeup
+sources that haven't been allocated with wakeup_source_create()
+aren't initialized and handled properly.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 7c95149b7f1f61201b12c73c4862a41bf2428961)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/wakeup.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index caf995f..6e591a8 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -64,7 +64,6 @@ struct wakeup_source *wakeup_source_create(const char *name)
+       if (!ws)
+               return NULL;
+-      spin_lock_init(&ws->lock);
+       if (name)
+               ws->name = kstrdup(name, GFP_KERNEL);
+@@ -105,6 +104,7 @@ void wakeup_source_add(struct wakeup_source *ws)
+       if (WARN_ON(!ws))
+               return;
++      spin_lock_init(&ws->lock);
+       setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
+       ws->active = false;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0190-PM-Sleep-Do-not-check-wakeup-too-often-in-try_to_fre.patch b/patches.runtime_pm/0190-PM-Sleep-Do-not-check-wakeup-too-often-in-try_to_fre.patch
new file mode 100644 (file)
index 0000000..ab39b14
--- /dev/null
@@ -0,0 +1,48 @@
+From e3a42333a2ec0816f4d42a72281291919a7c9da8 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 11 Feb 2012 00:00:34 +0100
+Subject: PM / Sleep: Do not check wakeup too often in try_to_freeze_tasks()
+
+Use the observation that it is more efficient to check the wakeup
+variable once before the loop reporting tasks that were not
+frozen in try_to_freeze_tasks() than to do that in every step of that
+loop.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 6c83b4818dd65eb17e633b6b629a81da7bed90b3)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/process.c |   16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 7e42645..6aeb5ef 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -98,13 +98,15 @@ static int try_to_freeze_tasks(bool user_only)
+                      elapsed_csecs / 100, elapsed_csecs % 100,
+                      todo - wq_busy, wq_busy);
+-              read_lock(&tasklist_lock);
+-              do_each_thread(g, p) {
+-                      if (!wakeup && !freezer_should_skip(p) &&
+-                          p != current && freezing(p) && !frozen(p))
+-                              sched_show_task(p);
+-              } while_each_thread(g, p);
+-              read_unlock(&tasklist_lock);
++              if (!wakeup) {
++                      read_lock(&tasklist_lock);
++                      do_each_thread(g, p) {
++                              if (p != current && !freezer_should_skip(p)
++                                  && freezing(p) && !frozen(p))
++                                      sched_show_task(p);
++                      } while_each_thread(g, p);
++                      read_unlock(&tasklist_lock);
++              }
+       } else {
+               printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
+                       elapsed_csecs % 100);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0191-PM-Sleep-Remove-unnecessary-label-from-suspend_freez.patch b/patches.runtime_pm/0191-PM-Sleep-Remove-unnecessary-label-from-suspend_freez.patch
new file mode 100644 (file)
index 0000000..9d316f9
--- /dev/null
@@ -0,0 +1,51 @@
+From 2924f4c473f6328ddc56bf54b6b5779d65d3b9e8 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Sat, 11 Feb 2012 22:40:23 +0100
+Subject: PM / Sleep: Remove unnecessary label from suspend_freeze_processes()
+
+The Finish label in suspend_freeze_processes() is in fact unnecessary
+and makes the function look more complicated than it really is, so
+remove that label (along with a few empty lines).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit 6f585f750d792652f33b6e85b1ee205be4b5e572)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/power.h |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 21724ee..398d42b 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -234,16 +234,14 @@ static inline int suspend_freeze_processes(void)
+       int error;
+       error = freeze_processes();
+-
+       /*
+        * freeze_processes() automatically thaws every task if freezing
+        * fails. So we need not do anything extra upon error.
+        */
+       if (error)
+-              goto Finish;
++              return error;
+       error = freeze_kernel_threads();
+-
+       /*
+        * freeze_kernel_threads() thaws only kernel threads upon freezing
+        * failure. So we have to thaw the userspace tasks ourselves.
+@@ -251,7 +249,6 @@ static inline int suspend_freeze_processes(void)
+       if (error)
+               thaw_processes();
+- Finish:
+       return error;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0192-PM-Sleep-Unify-kerneldoc-comments-in-kernel-power-su.patch b/patches.runtime_pm/0192-PM-Sleep-Unify-kerneldoc-comments-in-kernel-power-su.patch
new file mode 100644 (file)
index 0000000..11fb234
--- /dev/null
@@ -0,0 +1,142 @@
+From a6f7306c0933fa6c5e5e707d5a87bf53fbefeef1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 13 Feb 2012 16:29:14 +0100
+Subject: PM / Sleep: Unify kerneldoc comments in kernel/power/suspend.c
+
+The kerneldoc comments in kernel/power/suspend.c are not formatted
+in the same way and the quality of some of them is questionable.
+Unify the formatting and improve the contents.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit 55ae451918ec62e553f11b6118fec157f90c31c3)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/suspend.c |   56 +++++++++++++++++++++++-------------------------
+ 1 file changed, 27 insertions(+), 29 deletions(-)
+
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 03bc92b..e6b5ef9 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -37,8 +37,8 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
+ static const struct platform_suspend_ops *suspend_ops;
+ /**
+- *    suspend_set_ops - Set the global suspend method table.
+- *    @ops:   Pointer to ops structure.
++ * suspend_set_ops - Set the global suspend method table.
++ * @ops: Suspend operations to use.
+  */
+ void suspend_set_ops(const struct platform_suspend_ops *ops)
+ {
+@@ -58,11 +58,11 @@ bool valid_state(suspend_state_t state)
+ }
+ /**
+- * suspend_valid_only_mem - generic memory-only valid callback
++ * suspend_valid_only_mem - Generic memory-only valid callback.
+  *
+- * Platform drivers that implement mem suspend only and only need
+- * to check for that in their .valid callback can use this instead
+- * of rolling their own .valid callback.
++ * Platform drivers that implement mem suspend only and only need to check for
++ * that in their .valid() callback can use this instead of rolling their own
++ * .valid() callback.
+  */
+ int suspend_valid_only_mem(suspend_state_t state)
+ {
+@@ -83,10 +83,11 @@ static int suspend_test(int level)
+ }
+ /**
+- *    suspend_prepare - Do prep work before entering low-power state.
++ * suspend_prepare - Prepare for entering system sleep state.
+  *
+- *    This is common code that is called for each state that we're entering.
+- *    Run suspend notifiers, allocate a console and stop all processes.
++ * Common code run for every system sleep state that can be entered (except for
++ * hibernation).  Run suspend notifiers, allocate the "suspend" console and
++ * freeze processes.
+  */
+ static int suspend_prepare(void)
+ {
+@@ -131,9 +132,9 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
+ }
+ /**
+- * suspend_enter - enter the desired system sleep state.
+- * @state: State to enter
+- * @wakeup: Returns information that suspend should not be entered again.
++ * suspend_enter - Make the system enter the given sleep state.
++ * @state: System sleep state to enter.
++ * @wakeup: Returns information that the sleep state should not be re-entered.
+  *
+  * This function should be called after devices have been suspended.
+  */
+@@ -199,9 +200,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
+ }
+ /**
+- *    suspend_devices_and_enter - suspend devices and enter the desired system
+- *                                sleep state.
+- *    @state:           state to enter
++ * suspend_devices_and_enter - Suspend devices and enter system sleep state.
++ * @state: System sleep state to enter.
+  */
+ int suspend_devices_and_enter(suspend_state_t state)
+ {
+@@ -251,10 +251,10 @@ int suspend_devices_and_enter(suspend_state_t state)
+ }
+ /**
+- *    suspend_finish - Do final work before exiting suspend sequence.
++ * suspend_finish - Clean up before finishing the suspend sequence.
+  *
+- *    Call platform code to clean up, restart processes, and free the
+- *    console that we've allocated. This is not called for suspend-to-disk.
++ * Call platform code to clean up, restart processes, and free the console that
++ * we've allocated. This routine is not called for hibernation.
+  */
+ static void suspend_finish(void)
+ {
+@@ -265,14 +265,12 @@ static void suspend_finish(void)
+ }
+ /**
+- *    enter_state - Do common work of entering low-power state.
+- *    @state:         pm_state structure for state we're entering.
++ * enter_state - Do common work needed to enter system sleep state.
++ * @state: System sleep state to enter.
+  *
+- *    Make sure we're the only ones trying to enter a sleep state. Fail
+- *    if someone has beat us to it, since we don't want anything weird to
+- *    happen when we wake up.
+- *    Then, do the setup for suspend, enter the state, and cleaup (after
+- *    we've woken up).
++ * Make sure that no one else is trying to put the system into a sleep state.
++ * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
++ * system enter the given sleep state and clean up after wakeup.
+  */
+ int enter_state(suspend_state_t state)
+ {
+@@ -310,11 +308,11 @@ int enter_state(suspend_state_t state)
+ }
+ /**
+- *    pm_suspend - Externally visible function for suspending system.
+- *    @state:         Enumerated value of state to enter.
++ * pm_suspend - Externally visible function for suspending the system.
++ * @state: System sleep state to enter.
+  *
+- *    Determine whether or not value is within range, get state
+- *    structure, and enter (above).
++ * Check if the value of @state represents one of the supported states,
++ * execute enter_state() and update system suspend statistics.
+  */
+ int pm_suspend(suspend_state_t state)
+ {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0193-PM-Sleep-Make-enter_state-in-kernel-power-suspend.c-.patch b/patches.runtime_pm/0193-PM-Sleep-Make-enter_state-in-kernel-power-suspend.c-.patch
new file mode 100644 (file)
index 0000000..32530fa
--- /dev/null
@@ -0,0 +1,75 @@
+From e63cf3569f9135e10ee055a63a7dcbeeb00c1678 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 13 Feb 2012 16:29:24 +0100
+Subject: PM / Sleep: Make enter_state() in kernel/power/suspend.c static
+
+The enter_state() function in kernel/power/suspend.c should be
+static and state_store() in kernel/power/suspend.c should call
+pm_suspend() instead of it, so make that happen (which also reduces
+code duplication related to suspend statistics).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit 93e1ee43a72b11e1b50aab87046c131a836a4456)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/main.c    |    8 +++-----
+ kernel/power/power.h   |    2 --
+ kernel/power/suspend.c |    2 +-
+ 3 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index b1e3248..1c12581 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -291,12 +291,10 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ #ifdef CONFIG_SUSPEND
+       for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
+-              if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
++              if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
++                      error = pm_suspend(state);
+                       break;
+-      }
+-      if (state < PM_SUSPEND_MAX && *s) {
+-              error = enter_state(state);
+-              suspend_stats_update(error);
++              }
+       }
+ #endif
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 398d42b..98f3622 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -177,13 +177,11 @@ extern const char *const pm_states[];
+ extern bool valid_state(suspend_state_t state);
+ extern int suspend_devices_and_enter(suspend_state_t state);
+-extern int enter_state(suspend_state_t state);
+ #else /* !CONFIG_SUSPEND */
+ static inline int suspend_devices_and_enter(suspend_state_t state)
+ {
+       return -ENOSYS;
+ }
+-static inline int enter_state(suspend_state_t state) { return -ENOSYS; }
+ static inline bool valid_state(suspend_state_t state) { return false; }
+ #endif /* !CONFIG_SUSPEND */
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index e6b5ef9..4914358 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -272,7 +272,7 @@ static void suspend_finish(void)
+  * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
+  * system enter the given sleep state and clean up after wakeup.
+  */
+-int enter_state(suspend_state_t state)
++static int enter_state(suspend_state_t state)
+ {
+       int error;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0194-PM-Sleep-Drop-suspend_stats_update.patch b/patches.runtime_pm/0194-PM-Sleep-Drop-suspend_stats_update.patch
new file mode 100644 (file)
index 0000000..0d77fd1
--- /dev/null
@@ -0,0 +1,79 @@
+From 0574d62c60934e0ea28337532f8ea85a8de63068 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 13 Feb 2012 16:29:33 +0100
+Subject: PM / Sleep: Drop suspend_stats_update()
+
+Since suspend_stats_update() is only called from pm_suspend(),
+move its code directly into that function and remove the static
+inline definition from include/linux/suspend.h.  Clean_up
+pm_suspend() in the process.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+(cherry picked from commit bc25cf508942c56810d4fb623ef27b56ccef7783)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/suspend.h |   16 ----------------
+ kernel/power/suspend.c  |   18 ++++++++++++------
+ 2 files changed, 12 insertions(+), 22 deletions(-)
+
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index ec59fb0..43f0421 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -94,22 +94,6 @@ static inline void dpm_save_failed_step(enum suspend_stat_step step)
+ }
+ /**
+- * suspend_stats_update - Update success/failure statistics of suspend-to-ram
+- *
+- * @error: Value returned by enter_state() function
+- */
+-static inline void suspend_stats_update(int error)
+-{
+-      if (error) {
+-              suspend_stats.fail++;
+-              dpm_save_failed_errno(error);
+-      } else {
+-              suspend_stats.success++;
+-      }
+-}
+-
+-
+-/**
+  * struct platform_suspend_ops - Callbacks for managing platform dependent
+  *    system sleep states.
+  *
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 4914358..88e5c96 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -316,12 +316,18 @@ static int enter_state(suspend_state_t state)
+  */
+ int pm_suspend(suspend_state_t state)
+ {
+-      int ret;
+-      if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) {
+-              ret = enter_state(state);
+-              suspend_stats_update(ret);
+-              return ret;
++      int error;
++
++      if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
++              return -EINVAL;
++
++      error = enter_state(state);
++      if (error) {
++              suspend_stats.fail++;
++              dpm_save_failed_errno(error);
++      } else {
++              suspend_stats.success++;
+       }
+-      return -EINVAL;
++      return error;
+ }
+ EXPORT_SYMBOL(pm_suspend);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0195-PM-Add-comment-describing-relationships-between-PM-c.patch b/patches.runtime_pm/0195-PM-Add-comment-describing-relationships-between-PM-c.patch
new file mode 100644 (file)
index 0000000..1524824
--- /dev/null
@@ -0,0 +1,45 @@
+From 128b07acc4338764497410536d61c3e19056243d Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 13 Feb 2012 16:29:47 +0100
+Subject: PM: Add comment describing relationships between PM callbacks to
+ pm.h
+
+The UNIVERSAL_DEV_PM_OPS() macro is slightly misleading, because it
+may suggest that it's a good idea to point runtime PM callback
+pointers to the same routines as system suspend/resume callbacks
+.suspend() and .resume(), which is not the case.  For this reason,
+add a comment to include/linux/pm.h, next to the definition of
+UNIVERSAL_DEV_PM_OPS(), describing how device PM callbacks are
+related to each other.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c48825251cf5950da9d618144c4db6c130e6c0cd)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm.h |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 73c6105..d6dd6f6 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -320,6 +320,15 @@ const struct dev_pm_ops name = { \
+ /*
+  * Use this for defining a set of PM operations to be used in all situations
+  * (sustem suspend, hibernation or runtime PM).
++ * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
++ * be different from the corresponding runtime PM callbacks, .runtime_suspend(),
++ * and .runtime_resume(), because .runtime_suspend() always works on an already
++ * quiescent device, while .suspend() should assume that the device may be doing
++ * something when it is called (it should ensure that the device will be
++ * quiescent after it has returned).  Therefore it's better to point the "late"
++ * suspend and "early" resume callback pointers, .suspend_late() and
++ * .resume_early(), to the same routines as .runtime_suspend() and
++ * .runtime_resume(), respectively (and analogously for hibernation).
+  */
+ #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ const struct dev_pm_ops name = { \
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0196-PM-Hibernate-print-physical-addresses-consistently-w.patch b/patches.runtime_pm/0196-PM-Hibernate-print-physical-addresses-consistently-w.patch
new file mode 100644 (file)
index 0000000..16bae61
--- /dev/null
@@ -0,0 +1,39 @@
+From 3ac512d1edc96811715f8e35d7e66bed2dc23f16 Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Tue, 14 Feb 2012 22:20:52 +0100
+Subject: PM / Hibernate: print physical addresses consistently with other
+ parts of kernel
+
+Print physical address info in a style consistent with the %pR style used
+elsewhere in the kernel.
+
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 69f1d475cc80c55121852b3030873cdd407fd31c)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/snapshot.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 6a768e5..8e2e746 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -711,9 +711,10 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
+       list_for_each_entry(region, &nosave_regions, list) {
+               unsigned long pfn;
+-              pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
+-                              region->start_pfn << PAGE_SHIFT,
+-                              region->end_pfn << PAGE_SHIFT);
++              pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
++                       (unsigned long long) region->start_pfn << PAGE_SHIFT,
++                       ((unsigned long long) region->end_pfn << PAGE_SHIFT)
++                              - 1);
+               for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
+                       if (pfn_valid(pfn)) {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0197-PM-Sleep-Fix-possible-infinite-loop-during-wakeup-so.patch b/patches.runtime_pm/0197-PM-Sleep-Fix-possible-infinite-loop-during-wakeup-so.patch
new file mode 100644 (file)
index 0000000..caf02a3
--- /dev/null
@@ -0,0 +1,66 @@
+From 93978bb39092e1f4118fc414f7b0f817858c30ec Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 17 Feb 2012 23:39:20 +0100
+Subject: PM / Sleep: Fix possible infinite loop during wakeup source
+ destruction
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If wakeup_source_destroy() is called for an active wakeup source that
+is never deactivated, it will spin forever.  To prevent that from
+happening, make wakeup_source_destroy() call __pm_relax() for the
+wakeup source object it is about to free instead of waiting until
+it will be deactivated by someone else.  However, for this to work
+it also needs to make sure that the timer function will not be
+executed after the final __pm_relax(), so make it run
+del_timer_sync() on the wakeup source's timer beforehand.
+
+Additionally, update the kerneldoc comment to document the
+requirement that __pm_stay_awake() and __pm_wakeup_event() must not
+be run in parallel with wakeup_source_destroy().
+
+Reported-by: Arve HjønnevÃ¥g <arve@android.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit d94aff87826ee6aa43032f4c0263482913f4e2c8)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/wakeup.c |   15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index 6e591a8..d279f46 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -74,22 +74,17 @@ EXPORT_SYMBOL_GPL(wakeup_source_create);
+ /**
+  * wakeup_source_destroy - Destroy a struct wakeup_source object.
+  * @ws: Wakeup source to destroy.
++ *
++ * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
++ * be run in parallel with this function for the same wakeup source object.
+  */
+ void wakeup_source_destroy(struct wakeup_source *ws)
+ {
+       if (!ws)
+               return;
+-      spin_lock_irq(&ws->lock);
+-      while (ws->active) {
+-              spin_unlock_irq(&ws->lock);
+-
+-              schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
+-
+-              spin_lock_irq(&ws->lock);
+-      }
+-      spin_unlock_irq(&ws->lock);
+-
++      del_timer_sync(&ws->timer);
++      __pm_relax(ws);
+       kfree(ws->name);
+       kfree(ws);
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0198-PM-Sleep-Fix-race-conditions-related-to-wakeup-sourc.patch b/patches.runtime_pm/0198-PM-Sleep-Fix-race-conditions-related-to-wakeup-sourc.patch
new file mode 100644 (file)
index 0000000..21d7f46
--- /dev/null
@@ -0,0 +1,75 @@
+From ddd503dabe15f74329b9b5c13968ac4cd14bffe6 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Fri, 17 Feb 2012 23:39:33 +0100
+Subject: PM / Sleep: Fix race conditions related to wakeup source timer
+ function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If __pm_wakeup_event() has been used (with a nonzero timeout) to
+report a wakeup event and then __pm_relax() immediately followed by
+__pm_stay_awake() is called or __pm_wakeup_event() is called once
+again for the same wakeup source object before its timer expires, the
+timer function pm_wakeup_timer_fn() may still be run as a result of
+the previous __pm_wakeup_event() call.  In either of those cases it
+may mistakenly deactivate the wakeup source that has just been
+activated.
+
+To prevent that from happening, make wakeup_source_deactivate()
+clear the wakeup source's timer_expires field and make
+pm_wakeup_timer_fn() check if timer_expires is different from zero
+and if it's not in future before calling wakeup_source_deactivate()
+(if timer_expires is 0, it means that the timer has just been
+deleted and if timer_expires is in future, it means that the timer
+has just been rescheduled to a different time).
+
+Reported-by: Arve HjønnevÃ¥g <arve@android.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit da863cddd831b0f4bf2d067f8b75254f1be94590)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/wakeup.c |   16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index d279f46..b38bb9a 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -433,6 +433,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
+               ws->max_time = duration;
+       del_timer(&ws->timer);
++      ws->timer_expires = 0;
+       /*
+        * Increment the counter of registered wakeup events and decrement the
+@@ -487,11 +488,22 @@ EXPORT_SYMBOL_GPL(pm_relax);
+  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
+  * @data: Address of the wakeup source object associated with the event source.
+  *
+- * Call __pm_relax() for the wakeup source whose address is stored in @data.
++ * Call wakeup_source_deactivate() for the wakeup source whose address is stored
++ * in @data if it is currently active and its timer has not been canceled and
++ * the expiration time of the timer is not in future.
+  */
+ static void pm_wakeup_timer_fn(unsigned long data)
+ {
+-      __pm_relax((struct wakeup_source *)data);
++      struct wakeup_source *ws = (struct wakeup_source *)data;
++      unsigned long flags;
++
++      spin_lock_irqsave(&ws->lock, flags);
++
++      if (ws->active && ws->timer_expires
++          && time_after_eq(jiffies, ws->timer_expires))
++              wakeup_source_deactivate(ws);
++
++      spin_unlock_irqrestore(&ws->lock, flags);
+ }
+ /**
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0199-PM-Sleep-Add-more-wakeup-source-initialization-routi.patch b/patches.runtime_pm/0199-PM-Sleep-Add-more-wakeup-source-initialization-routi.patch
new file mode 100644 (file)
index 0000000..a2d0894
--- /dev/null
@@ -0,0 +1,179 @@
+From b27f07ce7cda0ac82f2f9907c51ea41ddb8f62f2 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 21 Feb 2012 23:47:56 +0100
+Subject: PM / Sleep: Add more wakeup source initialization routines
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The existing wakeup source initialization routines are not
+particularly useful for wakeup sources that aren't created by
+wakeup_source_create(), because their users have to open code
+filling the objects with zeros and setting their names.  For this
+reason, introduce routines that can be used for initializing, for
+example, static wakeup source objects.
+
+Requested-by: Arve HjønnevÃ¥g <arve@android.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 8671bbc1bd0442ef0eab27f7d56216431c490820)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/wakeup.c |   50 +++++++++++++++++++++++++++++++++++--------
+ include/linux/pm_wakeup.h   |   22 ++++++++++++++++++-
+ 2 files changed, 62 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index b38bb9a..4ef3223 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data);
+ static LIST_HEAD(wakeup_sources);
+ /**
++ * wakeup_source_prepare - Prepare a new wakeup source for initialization.
++ * @ws: Wakeup source to prepare.
++ * @name: Pointer to the name of the new wakeup source.
++ *
++ * Callers must ensure that the @name string won't be freed when @ws is still in
++ * use.
++ */
++void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
++{
++      if (ws) {
++              memset(ws, 0, sizeof(*ws));
++              ws->name = name;
++      }
++}
++EXPORT_SYMBOL_GPL(wakeup_source_prepare);
++
++/**
+  * wakeup_source_create - Create a struct wakeup_source object.
+  * @name: Name of the new wakeup source.
+  */
+@@ -60,31 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name)
+ {
+       struct wakeup_source *ws;
+-      ws = kzalloc(sizeof(*ws), GFP_KERNEL);
++      ws = kmalloc(sizeof(*ws), GFP_KERNEL);
+       if (!ws)
+               return NULL;
+-      if (name)
+-              ws->name = kstrdup(name, GFP_KERNEL);
+-
++      wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
+       return ws;
+ }
+ EXPORT_SYMBOL_GPL(wakeup_source_create);
+ /**
+- * wakeup_source_destroy - Destroy a struct wakeup_source object.
+- * @ws: Wakeup source to destroy.
++ * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
++ * @ws: Wakeup source to prepare for destruction.
+  *
+  * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
+  * be run in parallel with this function for the same wakeup source object.
+  */
+-void wakeup_source_destroy(struct wakeup_source *ws)
++void wakeup_source_drop(struct wakeup_source *ws)
+ {
+       if (!ws)
+               return;
+       del_timer_sync(&ws->timer);
+       __pm_relax(ws);
++}
++EXPORT_SYMBOL_GPL(wakeup_source_drop);
++
++/**
++ * wakeup_source_destroy - Destroy a struct wakeup_source object.
++ * @ws: Wakeup source to destroy.
++ *
++ * Use only for wakeup source objects created with wakeup_source_create().
++ */
++void wakeup_source_destroy(struct wakeup_source *ws)
++{
++      if (!ws)
++              return;
++
++      wakeup_source_drop(ws);
+       kfree(ws->name);
+       kfree(ws);
+ }
+@@ -147,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register);
+  */
+ void wakeup_source_unregister(struct wakeup_source *ws)
+ {
+-      wakeup_source_remove(ws);
+-      wakeup_source_destroy(ws);
++      if (ws) {
++              wakeup_source_remove(ws);
++              wakeup_source_destroy(ws);
++      }
+ }
+ EXPORT_SYMBOL_GPL(wakeup_source_unregister);
+diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
+index a32da96..d9f0511 100644
+--- a/include/linux/pm_wakeup.h
++++ b/include/linux/pm_wakeup.h
+@@ -41,7 +41,7 @@
+  * @active: Status of the wakeup source.
+  */
+ struct wakeup_source {
+-      char                    *name;
++      const char              *name;
+       struct list_head        entry;
+       spinlock_t              lock;
+       struct timer_list       timer;
+@@ -73,7 +73,9 @@ static inline bool device_may_wakeup(struct device *dev)
+ }
+ /* drivers/base/power/wakeup.c */
++extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
+ extern struct wakeup_source *wakeup_source_create(const char *name);
++extern void wakeup_source_drop(struct wakeup_source *ws);
+ extern void wakeup_source_destroy(struct wakeup_source *ws);
+ extern void wakeup_source_add(struct wakeup_source *ws);
+ extern void wakeup_source_remove(struct wakeup_source *ws);
+@@ -103,11 +105,16 @@ static inline bool device_can_wakeup(struct device *dev)
+       return dev->power.can_wakeup;
+ }
++static inline void wakeup_source_prepare(struct wakeup_source *ws,
++                                       const char *name) {}
++
+ static inline struct wakeup_source *wakeup_source_create(const char *name)
+ {
+       return NULL;
+ }
++static inline void wakeup_source_drop(struct wakeup_source *ws) {}
++
+ static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
+ static inline void wakeup_source_add(struct wakeup_source *ws) {}
+@@ -165,4 +172,17 @@ static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
+ #endif /* !CONFIG_PM_SLEEP */
++static inline void wakeup_source_init(struct wakeup_source *ws,
++                                    const char *name)
++{
++      wakeup_source_prepare(ws, name);
++      wakeup_source_add(ws);
++}
++
++static inline void wakeup_source_trash(struct wakeup_source *ws)
++{
++      wakeup_source_remove(ws);
++      wakeup_source_drop(ws);
++}
++
+ #endif /* _LINUX_PM_WAKEUP_H */
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0200-PM-Freezer-Remove-references-to-TIF_FREEZE-in-commen.patch b/patches.runtime_pm/0200-PM-Freezer-Remove-references-to-TIF_FREEZE-in-commen.patch
new file mode 100644 (file)
index 0000000..af02390
--- /dev/null
@@ -0,0 +1,80 @@
+From 17dc21d7e8b6f977d7bb35f7905a7be427e6c24e Mon Sep 17 00:00:00 2001
+From: Marcos Paulo de Souza <marcos.mage@gmail.com>
+Date: Tue, 21 Feb 2012 23:57:47 +0100
+Subject: PM / Freezer: Remove references to TIF_FREEZE in comments
+
+This patch removes all the references in the code about the TIF_FREEZE
+flag removed by commit a3201227f803ad7fd43180c5195dbe5a2bf998aa
+
+    freezer: make freezing() test freeze conditions in effect instead of TIF_FREEZE
+
+There still are some references to TIF_FREEZE in
+Documentation/power/freezing-of-tasks.txt, but it looks like that
+documentation needs more thorough work to reflect how the new
+freezer works, and hence merely removing the references to TIF_FREEZE
+won't really help. So I have not touched that part in this patch.
+
+Suggested-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Marcos Paulo de Souza <marcos.mage@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 37f08be11be9a7d9351fb1b9b408259519a126f3)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/exit.c          |    2 +-
+ kernel/freezer.c       |    6 +++---
+ kernel/power/process.c |    8 +++-----
+ 3 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 437d0cf..eb96623 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -437,7 +437,7 @@ void daemonize(const char *name, ...)
+        */
+       exit_mm(current);
+       /*
+-       * We don't want to have TIF_FREEZE set if the system-wide hibernation
++       * We don't want to get frozen, in case system-wide hibernation
+        * or suspend transition begins right now.
+        */
+       current->flags |= (PF_NOFREEZE | PF_KTHREAD);
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 9adeebe..82434ea 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -118,9 +118,9 @@ static void fake_signal_wake_up(struct task_struct *p)
+  * freeze_task - send a freeze request to given task
+  * @p: task to send the request to
+  *
+- * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
+- * flag and either sending a fake signal to it or waking it up, depending
+- * on whether it has %PF_FREEZER_NOSIG set.
++ * If @p is freezing, the freeze request is sent either by sending a fake
++ * signal (if it's not a kernel thread) or waking it up (if it's a kernel
++ * thread).
+  *
+  * RETURNS:
+  * %false, if @p is not freezing or already frozen; %true, otherwise
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 6aeb5ef..0d2aeb2 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -53,11 +53,9 @@ static int try_to_freeze_tasks(bool user_only)
+                        * It is "frozen enough".  If the task does wake
+                        * up, it will immediately call try_to_freeze.
+                        *
+-                       * Because freeze_task() goes through p's
+-                       * scheduler lock after setting TIF_FREEZE, it's
+-                       * guaranteed that either we see TASK_RUNNING or
+-                       * try_to_stop() after schedule() in ptrace/signal
+-                       * stop sees TIF_FREEZE.
++                       * Because freeze_task() goes through p's scheduler lock, it's
++                       * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
++                       * transition can't race with task state testing here.
+                        */
+                       if (!task_is_stopped_or_traced(p) &&
+                           !freezer_should_skip(p))
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0201-PM-Domains-Fix-include-for-PM_GENERIC_DOMAINS-n-case.patch b/patches.runtime_pm/0201-PM-Domains-Fix-include-for-PM_GENERIC_DOMAINS-n-case.patch
new file mode 100644 (file)
index 0000000..8c17fb8
--- /dev/null
@@ -0,0 +1,59 @@
+From d0793693ca877b8614d64bcd70051351df4f6c44 Mon Sep 17 00:00:00 2001
+From: Magnus Damm <damm@opensource.se>
+Date: Sat, 25 Feb 2012 22:14:18 +0100
+Subject: PM / Domains: Fix include for PM_GENERIC_DOMAINS=n case
+
+Fix pm_genpd_init() arguments and make sure dev_gpd_data() and
+simple_qos_governor exist regardless of CONFIG_PM_GENERIC_DOMAINS
+setting.
+
+Signed-off-by: Magnus Damm <damm@opensource.se>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit b642631d38c28fefd1232a6b96713eb54b60130d)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_domain.h |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index e76cc9a..5c2bbc2 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -155,6 +155,10 @@ extern bool default_stop_ok(struct device *dev);
+ extern struct dev_power_governor pm_domain_always_on_gov;
+ #else
++static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
++{
++      return ERR_PTR(-ENOSYS);
++}
+ static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+ {
+       return ERR_PTR(-ENOSYS);
+@@ -195,7 +199,8 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+ {
+       return -ENOSYS;
+ }
+-static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
++static inline void pm_genpd_init(struct generic_pm_domain *genpd,
++                               struct dev_power_governor *gov, bool is_off)
+ {
+ }
+ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+@@ -206,11 +211,8 @@ static inline bool default_stop_ok(struct device *dev)
+ {
+       return false;
+ }
++#define simple_qos_governor NULL
+ #define pm_domain_always_on_gov NULL
+-static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+-{
+-      return NULL;
+-}
+ #endif
+ static inline int pm_genpd_remove_callbacks(struct device *dev)
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0202-PM-QoS-Make-it-possible-to-expose-PM-QoS-latency-con.patch b/patches.runtime_pm/0202-PM-QoS-Make-it-possible-to-expose-PM-QoS-latency-con.patch
new file mode 100644 (file)
index 0000000..8a9f983
--- /dev/null
@@ -0,0 +1,282 @@
+From 93d69cf1fff51419dd636b72da27e92c62638682 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 13 Mar 2012 01:01:39 +0100
+Subject: PM / QoS: Make it possible to expose PM QoS latency constraints
+
+A runtime suspend of a device (e.g. an MMC controller) belonging to
+a power domain or, in a more complicated scenario, a runtime suspend
+of another device in the same power domain, may cause power to be
+removed from the entire domain.  In that case, the amount of time
+necessary to runtime-resume the given device (e.g. the MMC
+controller) is often substantially greater than the time needed to
+run its driver's runtime resume callback.  That may hurt performance
+in some situations, because user data may need to wait for the
+device to become operational, so we should make it possible to
+prevent that from happening.
+
+For this reason, introduce a new sysfs attribute for devices,
+power/pm_qos_resume_latency_us, allowing user space to specify the
+upper bound of the time necessary to bring the (runtime-suspended)
+device up after the resume of it has been requested.  However, make
+that attribute appear only for the devices whose drivers declare
+support for it by calling the (new) dev_pm_qos_expose_latency_limit()
+helper function with the appropriate initial value of the attribute.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reviewed-by: Kevin Hilman <khilman@ti.com>
+Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Acked-by: Linus Walleij <linus.walleij@linaro.org>
+(cherry picked from commit 85dc0b8a4019e38ad4fd0c008f89a5c241805ac2)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ Documentation/ABI/testing/sysfs-devices-power |   18 ++++++++
+ drivers/base/power/power.h                    |    4 ++
+ drivers/base/power/qos.c                      |   61 +++++++++++++++++++++++++
+ drivers/base/power/sysfs.c                    |   47 +++++++++++++++++++
+ include/linux/pm.h                            |    1 +
+ include/linux/pm_qos.h                        |    9 ++++
+ 6 files changed, 140 insertions(+)
+
+diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
+index 8ffbc25..840f7d6 100644
+--- a/Documentation/ABI/testing/sysfs-devices-power
++++ b/Documentation/ABI/testing/sysfs-devices-power
+@@ -165,3 +165,21 @@ Description:
+               Not all drivers support this attribute.  If it isn't supported,
+               attempts to read or write it will yield I/O errors.
++
++What:         /sys/devices/.../power/pm_qos_latency_us
++Date:         March 2012
++Contact:      Rafael J. Wysocki <rjw@sisk.pl>
++Description:
++              The /sys/devices/.../power/pm_qos_resume_latency_us attribute
++              contains the PM QoS resume latency limit for the given device,
++              which is the maximum allowed time it can take to resume the
++              device, after it has been suspended at run time, from a resume
++              request to the moment the device will be ready to process I/O,
++              in microseconds.  If it is equal to 0, however, this means that
++              the PM QoS resume latency may be arbitrary.
++
++              Not all drivers support this attribute.  If it isn't supported,
++              it is not present.
++
++              This attribute has no effect on system-wide suspend/resume and
++              hibernation.
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index 9bf6232..eeb4bff 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev);
+ extern void rpm_sysfs_remove(struct device *dev);
+ extern int wakeup_sysfs_add(struct device *dev);
+ extern void wakeup_sysfs_remove(struct device *dev);
++extern int pm_qos_sysfs_add(struct device *dev);
++extern void pm_qos_sysfs_remove(struct device *dev);
+ #else /* CONFIG_PM */
+@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {}
+ static inline void rpm_sysfs_remove(struct device *dev) {}
+ static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
+ static inline void wakeup_sysfs_remove(struct device *dev) {}
++static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
++static inline void pm_qos_sysfs_remove(struct device *dev) {}
+ #endif
+diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
+index c5d3588..7185557 100644
+--- a/drivers/base/power/qos.c
++++ b/drivers/base/power/qos.c
+@@ -41,6 +41,7 @@
+ #include <linux/mutex.h>
+ #include <linux/export.h>
++#include "power.h"
+ static DEFINE_MUTEX(dev_pm_qos_mtx);
+@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
+       struct dev_pm_qos_request *req, *tmp;
+       struct pm_qos_constraints *c;
++      /*
++       * If the device's PM QoS resume latency limit has been exposed to user
++       * space, it has to be hidden at this point.
++       */
++      dev_pm_qos_hide_latency_limit(dev);
++
+       mutex_lock(&dev_pm_qos_mtx);
+       dev->power.power_state = PMSG_INVALID;
+@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
+       return error;
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
++
++#ifdef CONFIG_PM_RUNTIME
++static void __dev_pm_qos_drop_user_request(struct device *dev)
++{
++      dev_pm_qos_remove_request(dev->power.pq_req);
++      dev->power.pq_req = 0;
++}
++
++/**
++ * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
++ * @dev: Device whose PM QoS latency limit is to be exposed to user space.
++ * @value: Initial value of the latency limit.
++ */
++int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
++{
++      struct dev_pm_qos_request *req;
++      int ret;
++
++      if (!device_is_registered(dev) || value < 0)
++              return -EINVAL;
++
++      if (dev->power.pq_req)
++              return -EEXIST;
++
++      req = kzalloc(sizeof(*req), GFP_KERNEL);
++      if (!req)
++              return -ENOMEM;
++
++      ret = dev_pm_qos_add_request(dev, req, value);
++      if (ret < 0)
++              return ret;
++
++      dev->power.pq_req = req;
++      ret = pm_qos_sysfs_add(dev);
++      if (ret)
++              __dev_pm_qos_drop_user_request(dev);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
++
++/**
++ * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
++ * @dev: Device whose PM QoS latency limit is to be hidden from user space.
++ */
++void dev_pm_qos_hide_latency_limit(struct device *dev)
++{
++      if (dev->power.pq_req) {
++              pm_qos_sysfs_remove(dev);
++              __dev_pm_qos_drop_user_request(dev);
++      }
++}
++EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
++#endif /* CONFIG_PM_RUNTIME */
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index ac63d48..7f3d5d8 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -5,6 +5,7 @@
+ #include <linux/device.h>
+ #include <linux/string.h>
+ #include <linux/export.h>
++#include <linux/pm_qos.h>
+ #include <linux/pm_runtime.h>
+ #include <asm/atomic.h>
+ #include <linux/jiffies.h>
+@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
+ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
+               autosuspend_delay_ms_store);
++static ssize_t pm_qos_latency_show(struct device *dev,
++                                 struct device_attribute *attr, char *buf)
++{
++      return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
++}
++
++static ssize_t pm_qos_latency_store(struct device *dev,
++                                  struct device_attribute *attr,
++                                  const char *buf, size_t n)
++{
++      s32 value;
++      int ret;
++
++      if (kstrtos32(buf, 0, &value))
++              return -EINVAL;
++
++      if (value < 0)
++              return -EINVAL;
++
++      ret = dev_pm_qos_update_request(dev->power.pq_req, value);
++      return ret < 0 ? ret : n;
++}
++
++static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
++                 pm_qos_latency_show, pm_qos_latency_store);
+ #endif /* CONFIG_PM_RUNTIME */
+ #ifdef CONFIG_PM_SLEEP
+@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = {
+       .attrs  = runtime_attrs,
+ };
++static struct attribute *pm_qos_attrs[] = {
++#ifdef CONFIG_PM_RUNTIME
++      &dev_attr_pm_qos_resume_latency_us.attr,
++#endif /* CONFIG_PM_RUNTIME */
++      NULL,
++};
++static struct attribute_group pm_qos_attr_group = {
++      .name   = power_group_name,
++      .attrs  = pm_qos_attrs,
++};
++
+ int dpm_sysfs_add(struct device *dev)
+ {
+       int rc;
+@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev)
+       sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ }
++int pm_qos_sysfs_add(struct device *dev)
++{
++      return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
++}
++
++void pm_qos_sysfs_remove(struct device *dev)
++{
++      sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
++}
++
+ void rpm_sysfs_remove(struct device *dev)
+ {
+       sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index d6dd6f6..715305e 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -546,6 +546,7 @@ struct dev_pm_info {
+       unsigned long           accounting_timestamp;
+       ktime_t                 suspend_time;
+       s64                     max_time_suspended_ns;
++      struct dev_pm_qos_request *pq_req;
+ #endif
+       struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
+       struct pm_qos_constraints *constraints;
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index c8a541e..2e9191a 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -137,4 +137,13 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
+                       { return 0; }
+ #endif
++#ifdef CONFIG_PM_RUNTIME
++int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
++void dev_pm_qos_hide_latency_limit(struct device *dev);
++#else
++static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
++                      { return 0; }
++static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
++#endif
++
+ #endif
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0203-PM-Domains-Fix-handling-of-wakeup-devices-during-sys.patch b/patches.runtime_pm/0203-PM-Domains-Fix-handling-of-wakeup-devices-during-sys.patch
new file mode 100644 (file)
index 0000000..42d5844
--- /dev/null
@@ -0,0 +1,51 @@
+From 6993891333e6d261a80c7ef73066d993f473c4da Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 13 Mar 2012 22:39:31 +0100
+Subject: PM / Domains: Fix handling of wakeup devices during system resume
+
+During system suspend pm_genpd_suspend_noirq() checks if the given
+device is in a wakeup path (i.e. it appears to be needed for one or
+more wakeup devices to work or is a wakeup device itself) and if it
+needs to be "active" for wakeup to work.  If that is the case, the
+function returns 0 without incrementing the device domain's counter
+of suspended devices and without executing genpd_stop_dev() for the
+device.  In consequence, the device is not stopped (e.g. its clock
+isn't disabled) and power is always supplied to its domain in the
+resulting system sleep state.
+
+However, pm_genpd_resume_noirq() doesn't repeat that check and it
+runs genpd_start_dev() and decrements the domain's counter of
+suspended devices even for the wakeup device that weren't stopped by
+pm_genpd_suspend_noirq().  As a result, the start callback may be run
+unnecessarily for them and their domains' counters of suspended
+devices may become negative.  Both outcomes aren't desirable, so fix
+pm_genpd_resume_noirq() to look for wakeup devices that might not be
+stopped by during system suspend.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Tested-by: Simon Horman <horms@verge.net.au>
+Cc: stable@vger.kernel.org
+(cherry picked from commit cc85b20780562d404e18a47b9b55b4a5102ae53e)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index d2c0323..e79228c 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -890,7 +890,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->suspend_power_off)
++      if (genpd->suspend_power_off
++          || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+               return 0;
+       /*
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0204-PM-Domains-Fix-hibernation-restore-of-devices-v2.patch b/patches.runtime_pm/0204-PM-Domains-Fix-hibernation-restore-of-devices-v2.patch
new file mode 100644 (file)
index 0000000..a8b4e47
--- /dev/null
@@ -0,0 +1,97 @@
+From beb3fd5a129cc47ded8c0b7ece93597d6188ad91 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 13 Mar 2012 22:39:37 +0100
+Subject: PM / Domains: Fix hibernation restore of devices, v2
+
+During resume from hibernation pm_genpd_restore_noirq() should only
+power off domains whose suspend_power_off flags are set once and
+not every time it is called for a device in the given domain.
+Moreover, it shouldn't decrement genpd->suspended_count, because
+that field is not touched during device freezing and therefore it is
+always equal to 0 when pm_genpd_restore_noirq() runs for the first
+device in the given domain.
+
+This means pm_genpd_restore_noirq() may use genpd->suspended_count
+to determine whether or not it it has been called for the domain in
+question already in this cycle (it only needs to increment that
+field every time it runs for this purpose) and whether or not it
+should check if the domain needs to be powered off.  For that to
+work, though, pm_genpd_prepare() has to clear genpd->suspended_count
+when it runs for the first device in the given domain (in which case
+that flag need not be cleared during domain initialization).
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Cc: stable@vger.kernel.org
+(cherry picked from commit 65533bbf63b4f37723fdfedc73d0653958973323)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index e79228c..84f4bee 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -764,8 +764,10 @@ static int pm_genpd_prepare(struct device *dev)
+       genpd_acquire_lock(genpd);
+-      if (genpd->prepared_count++ == 0)
++      if (genpd->prepared_count++ == 0) {
++              genpd->suspended_count = 0;
+               genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
++      }
+       genpd_release_lock(genpd);
+@@ -1097,20 +1099,30 @@ static int pm_genpd_restore_noirq(struct device *dev)
+        * Since all of the "noirq" callbacks are executed sequentially, it is
+        * guaranteed that this function will never run twice in parallel for
+        * the same PM domain, so it is not necessary to use locking here.
++       *
++       * At this point suspended_count == 0 means we are being run for the
++       * first time for the given domain in the present cycle.
+        */
+-      genpd->status = GPD_STATE_POWER_OFF;
+-      if (genpd->suspend_power_off) {
++      if (genpd->suspended_count++ == 0) {
+               /*
+-               * The boot kernel might put the domain into the power on state,
+-               * so make sure it really is powered off.
++               * The boot kernel might put the domain into arbitrary state,
++               * so make it appear as powered off to pm_genpd_poweron(), so
++               * that it tries to power it on in case it was really off.
+                */
+-              if (genpd->power_off)
+-                      genpd->power_off(genpd);
+-              return 0;
++              genpd->status = GPD_STATE_POWER_OFF;
++              if (genpd->suspend_power_off) {
++                      /*
++                       * If the domain was off before the hibernation, make
++                       * sure it will be off going forward.
++                       */
++                      if (genpd->power_off)
++                              genpd->power_off(genpd);
++
++                      return 0;
++              }
+       }
+       pm_genpd_poweron(genpd);
+-      genpd->suspended_count--;
+       return genpd_start_dev(genpd, dev);
+ }
+@@ -1649,7 +1661,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
+       genpd->poweroff_task = NULL;
+       genpd->resume_count = 0;
+       genpd->device_count = 0;
+-      genpd->suspended_count = 0;
+       genpd->max_off_time_ns = -1;
+       genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+       genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0205-PM-Domains-Introduce-always-on-device-flag.patch b/patches.runtime_pm/0205-PM-Domains-Introduce-always-on-device-flag.patch
new file mode 100644 (file)
index 0000000..ec38c85
--- /dev/null
@@ -0,0 +1,155 @@
+From d3ade56cdfb6d8f10e32198ba9f998396189a885 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Tue, 13 Mar 2012 22:39:48 +0100
+Subject: PM / Domains: Introduce "always on" device flag
+
+The TMU device on the Mackerel board belongs to the A4R power domain
+and loses power when the domain is turned off.  Unfortunately, the
+TMU driver is not prepared to cope with such situations and crashes
+the system when that happens.  To work around this problem introduce
+a new helper function, pm_genpd_dev_always_on(), allowing a device
+driver to mark its device as "always on" in case it belongs to a PM
+domain, which will make the generic PM domains core code avoid
+powering off the domain containing the device, both at run time and
+during system suspend.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Tested-by: Simon Horman <horms@verge.net.au>
+Acked-by: Paul Mundt <lethal@linux-sh.org>
+Cc: stable@vger.kernel.org
+(cherry picked from commit 1e78a0c7fc92aee076965d516cf54475c39e9894)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |   37 +++++++++++++++++++++++++++++++------
+ include/linux/pm_domain.h   |    3 +++
+ 2 files changed, 34 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 84f4bee..b6ff6ec 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+       not_suspended = 0;
+       list_for_each_entry(pdd, &genpd->dev_list, list_node)
+               if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
+-                  || pdd->dev->power.irq_safe))
++                  || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
+                       not_suspended++;
+       if (not_suspended > genpd->in_progress)
+@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
+       might_sleep_if(!genpd->dev_irq_safe);
++      if (dev_gpd_data(dev)->always_on)
++              return -EBUSY;
++
+       stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
+       if (stop_ok && !stop_ok(dev))
+               return -EBUSY;
+@@ -859,7 +862,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->suspend_power_off
++      if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+           || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+               return 0;
+@@ -892,7 +895,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      if (genpd->suspend_power_off
++      if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+           || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+               return 0;
+@@ -1012,7 +1015,8 @@ static int pm_genpd_freeze_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
++      return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
++              0 : genpd_stop_dev(genpd, dev);
+ }
+ /**
+@@ -1032,7 +1036,8 @@ static int pm_genpd_thaw_noirq(struct device *dev)
+       if (IS_ERR(genpd))
+               return -EINVAL;
+-      return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
++      return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
++              0 : genpd_start_dev(genpd, dev);
+ }
+ /**
+@@ -1124,7 +1129,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
+       pm_genpd_poweron(genpd);
+-      return genpd_start_dev(genpd, dev);
++      return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
+ }
+ /**
+@@ -1320,6 +1325,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ }
+ /**
++ * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
++ * @dev: Device to set/unset the flag for.
++ * @val: The new value of the device's "always on" flag.
++ */
++void pm_genpd_dev_always_on(struct device *dev, bool val)
++{
++      struct pm_subsys_data *psd;
++      unsigned long flags;
++
++      spin_lock_irqsave(&dev->power.lock, flags);
++
++      psd = dev_to_psd(dev);
++      if (psd && psd->domain_data)
++              to_gpd_data(psd->domain_data)->always_on = val;
++
++      spin_unlock_irqrestore(&dev->power.lock, flags);
++}
++EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
++
++/**
+  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+  * @genpd: Master PM domain to add the subdomain to.
+  * @subdomain: Subdomain to be added.
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 5c2bbc2..1236d26 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -99,6 +99,7 @@ struct generic_pm_domain_data {
+       struct gpd_dev_ops ops;
+       struct gpd_timing_data td;
+       bool need_restore;
++      bool always_on;
+ };
+ #ifdef CONFIG_PM_GENERIC_DOMAINS
+@@ -137,6 +138,7 @@ static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
+ extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+                                 struct device *dev);
++extern void pm_genpd_dev_always_on(struct device *dev, bool val);
+ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+                                 struct generic_pm_domain *new_subdomain);
+ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+@@ -179,6 +181,7 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ {
+       return -ENOSYS;
+ }
++static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {}
+ static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+                                        struct generic_pm_domain *new_sd)
+ {
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0206-PM-Domains-Check-domain-status-during-hibernation-re.patch b/patches.runtime_pm/0206-PM-Domains-Check-domain-status-during-hibernation-re.patch
new file mode 100644 (file)
index 0000000..57cb29b
--- /dev/null
@@ -0,0 +1,40 @@
+From 264e68cf49260d40ee819c43c38fe763c4955442 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 19 Mar 2012 10:38:14 +0100
+Subject: PM / Domains: Check domain status during hibernation restore of
+ devices
+
+Power domains that were off before hibernation shouldn't be turned on
+during device restore, so prevent that from happening.
+
+This change fixes up commit 65533bbf63b4f37723fdfedc73d0653958973323
+
+    PM / Domains: Fix hibernation restore of devices, v2
+
+that didn't include it by mistake.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit 18dd2ece3cde14cfd42e95a89eb14016699a5f15)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/domain.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index b6ff6ec..73ce9fb 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1127,6 +1127,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
+               }
+       }
++      if (genpd->suspend_power_off)
++              return 0;
++
+       pm_genpd_poweron(genpd);
+       return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0207-PM-Runtime-don-t-forget-to-wake-up-waitqueue-on-fail.patch b/patches.runtime_pm/0207-PM-Runtime-don-t-forget-to-wake-up-waitqueue-on-fail.patch
new file mode 100644 (file)
index 0000000..0704462
--- /dev/null
@@ -0,0 +1,49 @@
+From d8f3d391ef39a0fd57c67ecd770d3c7ac48294a1 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Mon, 26 Mar 2012 22:46:52 +0200
+Subject: PM / Runtime: don't forget to wake up waitqueue on failure
+
+This patch (as1535) fixes a bug in the runtime PM core.  When a
+runtime suspend attempt completes, whether successfully or not, the
+device's power.wait_queue is supposed to be signalled.  But this
+doesn't happen in the failure pathway of rpm_suspend() when another
+autosuspend attempt is rescheduled.  As a result, a task can get stuck
+indefinitely on the wait queue (I have seen this happen in testing).
+
+The patch fixes the problem by moving the wake_up_all() call up near
+the start of the failure code.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+CC: <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit f2791d733a2f06997b573d1a3cfde21e6f529826)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ drivers/base/power/runtime.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 541f821..bd0f394 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -532,6 +532,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       dev->power.suspend_time = ktime_set(0, 0);
+       dev->power.max_time_suspended_ns = -1;
+       dev->power.deferred_resume = false;
++      wake_up_all(&dev->power.wait_queue);
++
+       if (retval == -EAGAIN || retval == -EBUSY) {
+               dev->power.runtime_error = 0;
+@@ -547,7 +549,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+       } else {
+               pm_runtime_cancel_pending(dev);
+       }
+-      wake_up_all(&dev->power.wait_queue);
+       goto out;
+ }
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0208-PM-Hibernate-Disable-usermode-helpers-right-before-f.patch b/patches.runtime_pm/0208-PM-Hibernate-Disable-usermode-helpers-right-before-f.patch
new file mode 100644 (file)
index 0000000..a1cbfb4
--- /dev/null
@@ -0,0 +1,94 @@
+From e6b0b85e018ebd712c50402d9850afe725edc618 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 28 Mar 2012 23:30:14 +0200
+Subject: PM / Hibernate: Disable usermode helpers right before freezing tasks
+
+There is no reason to call usermodehelper_disable() before creating
+memory bitmaps in hibernate() and software_resume(), so call it right
+before freeze_processes(), in accordance with the other suspend and
+hibernation code.  Consequently, call usermodehelper_enable() right
+after the thawing of tasks rather than after freeing the memory
+bitmaps.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org
+(cherry picked from commit 7b5179ac14dbad945647ac9e76bbbf14ed9e0dbe)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 0a186cf..639ff6e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -611,19 +611,19 @@ int hibernate(void)
+       if (error)
+               goto Exit;
+-      error = usermodehelper_disable();
+-      if (error)
+-              goto Exit;
+-
+       /* Allocate memory management structures */
+       error = create_basic_memory_bitmaps();
+       if (error)
+-              goto Enable_umh;
++              goto Exit;
+       printk(KERN_INFO "PM: Syncing filesystems ... ");
+       sys_sync();
+       printk("done.\n");
++      error = usermodehelper_disable();
++      if (error)
++              goto Exit;
++
+       error = freeze_processes();
+       if (error)
+               goto Free_bitmaps;
+@@ -660,9 +660,8 @@ int hibernate(void)
+       freezer_test_done = false;
+  Free_bitmaps:
+-      free_basic_memory_bitmaps();
+- Enable_umh:
+       usermodehelper_enable();
++      free_basic_memory_bitmaps();
+  Exit:
+       pm_notifier_call_chain(PM_POST_HIBERNATION);
+       pm_restore_console();
+@@ -777,15 +776,13 @@ static int software_resume(void)
+       if (error)
+               goto close_finish;
+-      error = usermodehelper_disable();
++      error = create_basic_memory_bitmaps();
+       if (error)
+               goto close_finish;
+-      error = create_basic_memory_bitmaps();
+-      if (error) {
+-              usermodehelper_enable();
++      error = usermodehelper_disable();
++      if (error)
+               goto close_finish;
+-      }
+       pr_debug("PM: Preparing processes for restore.\n");
+       error = freeze_processes();
+@@ -805,8 +802,8 @@ static int software_resume(void)
+       swsusp_free();
+       thaw_processes();
+  Done:
+-      free_basic_memory_bitmaps();
+       usermodehelper_enable();
++      free_basic_memory_bitmaps();
+  Finish:
+       pm_notifier_call_chain(PM_POST_RESTORE);
+       pm_restore_console();
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0209-PM-Sleep-Move-disabling-of-usermode-helpers-to-the-f.patch b/patches.runtime_pm/0209-PM-Sleep-Move-disabling-of-usermode-helpers-to-the-f.patch
new file mode 100644 (file)
index 0000000..242b046
--- /dev/null
@@ -0,0 +1,188 @@
+From af996e4f80cea15bc92578f2dbc358e29cc6e9c0 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Wed, 28 Mar 2012 23:30:21 +0200
+Subject: PM / Sleep: Move disabling of usermode helpers to the freezer
+
+The core suspend/hibernation code calls usermodehelper_disable() to
+avoid race conditions between the freezer and the starting of
+usermode helpers and each code path has to do that on its own.
+However, it is always called right before freeze_processes()
+and usermodehelper_enable() is always called right after
+thaw_processes().  For this reason, to avoid code duplication and
+to make the connection between usermodehelper_disable() and the
+freezer more visible, make freeze_processes() call it and remove the
+direct usermodehelper_disable() and usermodehelper_enable() calls
+from all suspend/hibernation code paths.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org
+(cherry picked from commit 1e73203cd1157a03facc41ffb54050f5b28e55bd)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ kernel/power/hibernate.c |   11 -----------
+ kernel/power/process.c   |    7 +++++++
+ kernel/power/suspend.c   |    7 -------
+ kernel/power/user.c      |   10 +---------
+ 4 files changed, 8 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 639ff6e..e09dfbf 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -16,7 +16,6 @@
+ #include <linux/string.h>
+ #include <linux/device.h>
+ #include <linux/async.h>
+-#include <linux/kmod.h>
+ #include <linux/delay.h>
+ #include <linux/fs.h>
+ #include <linux/mount.h>
+@@ -620,10 +619,6 @@ int hibernate(void)
+       sys_sync();
+       printk("done.\n");
+-      error = usermodehelper_disable();
+-      if (error)
+-              goto Exit;
+-
+       error = freeze_processes();
+       if (error)
+               goto Free_bitmaps;
+@@ -660,7 +655,6 @@ int hibernate(void)
+       freezer_test_done = false;
+  Free_bitmaps:
+-      usermodehelper_enable();
+       free_basic_memory_bitmaps();
+  Exit:
+       pm_notifier_call_chain(PM_POST_HIBERNATION);
+@@ -780,10 +774,6 @@ static int software_resume(void)
+       if (error)
+               goto close_finish;
+-      error = usermodehelper_disable();
+-      if (error)
+-              goto close_finish;
+-
+       pr_debug("PM: Preparing processes for restore.\n");
+       error = freeze_processes();
+       if (error) {
+@@ -802,7 +792,6 @@ static int software_resume(void)
+       swsusp_free();
+       thaw_processes();
+  Done:
+-      usermodehelper_enable();
+       free_basic_memory_bitmaps();
+  Finish:
+       pm_notifier_call_chain(PM_POST_RESTORE);
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 0d2aeb2..56eaac7 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -16,6 +16,7 @@
+ #include <linux/freezer.h>
+ #include <linux/delay.h>
+ #include <linux/workqueue.h>
++#include <linux/kmod.h>
+ /* 
+  * Timeout for stopping processes
+@@ -122,6 +123,10 @@ int freeze_processes(void)
+ {
+       int error;
++      error = usermodehelper_disable();
++      if (error)
++              return error;
++
+       if (!pm_freezing)
+               atomic_inc(&system_freezing_cnt);
+@@ -187,6 +192,8 @@ void thaw_processes(void)
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
++      usermodehelper_enable();
++
+       schedule();
+       printk("done.\n");
+ }
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 88e5c96..396d262 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -12,7 +12,6 @@
+ #include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+-#include <linux/kmod.h>
+ #include <linux/console.h>
+ #include <linux/cpu.h>
+ #include <linux/syscalls.h>
+@@ -102,17 +101,12 @@ static int suspend_prepare(void)
+       if (error)
+               goto Finish;
+-      error = usermodehelper_disable();
+-      if (error)
+-              goto Finish;
+-
+       error = suspend_freeze_processes();
+       if (!error)
+               return 0;
+       suspend_stats.failed_freeze++;
+       dpm_save_failed_step(SUSPEND_FREEZE);
+-      usermodehelper_enable();
+  Finish:
+       pm_notifier_call_chain(PM_POST_SUSPEND);
+       pm_restore_console();
+@@ -259,7 +253,6 @@ int suspend_devices_and_enter(suspend_state_t state)
+ static void suspend_finish(void)
+ {
+       suspend_thaw_processes();
+-      usermodehelper_enable();
+       pm_notifier_call_chain(PM_POST_SUSPEND);
+       pm_restore_console();
+ }
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 33c4329..91b0fd0 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -12,7 +12,6 @@
+ #include <linux/suspend.h>
+ #include <linux/syscalls.h>
+ #include <linux/reboot.h>
+-#include <linux/kmod.h>
+ #include <linux/string.h>
+ #include <linux/device.h>
+ #include <linux/miscdevice.h>
+@@ -222,14 +221,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+               sys_sync();
+               printk("done.\n");
+-              error = usermodehelper_disable();
+-              if (error)
+-                      break;
+-
+               error = freeze_processes();
+-              if (error)
+-                      usermodehelper_enable();
+-              else
++              if (!error)
+                       data->frozen = 1;
+               break;
+@@ -238,7 +231,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+                       break;
+               pm_restore_gfp_mask();
+               thaw_processes();
+-              usermodehelper_enable();
+               data->frozen = 0;
+               break;
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/patches.runtime_pm/0210-PM-QoS-add-pm_qos_update_request_timeout-API.patch b/patches.runtime_pm/0210-PM-QoS-add-pm_qos_update_request_timeout-API.patch
new file mode 100644 (file)
index 0000000..65d5640
--- /dev/null
@@ -0,0 +1,150 @@
+From 2f11aa1bec43368e9e6d8706d18b3dbb5c5f7569 Mon Sep 17 00:00:00 2001
+From: MyungJoo Ham <myungjoo.ham@samsung.com>
+Date: Wed, 28 Mar 2012 23:31:24 +0200
+Subject: PM / QoS: add pm_qos_update_request_timeout() API
+
+The new API, pm_qos_update_request_timeout() is to provide a timeout
+with pm_qos_update_request.
+
+For example, pm_qos_update_request_timeout(req, 100, 1000), means that
+QoS request on req with value 100 will be active for 1000 microseconds.
+After 1000 microseconds, the QoS request thru req is reset. If there
+were another pm_qos_update_request(req, x) during the 1000 us, this
+new request with value x will override as this is another request on the
+same req handle. A new request on the same req handle will always
+override the previous request whether it is the conventional request or
+it is the new timeout request.
+
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Acked-by: Mark Gross <markgross@thegnar.org>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+(cherry picked from commit c4772d192c70b61d52262b0db76f7abd8aeb51c6)
+
+Signed-off-by: Simon Horman <horms@verge.net.au>
+---
+ include/linux/pm_qos.h |    4 ++++
+ kernel/power/qos.c     |   50 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 54 insertions(+)
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 2e9191a..233149c 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -8,6 +8,7 @@
+ #include <linux/notifier.h>
+ #include <linux/miscdevice.h>
+ #include <linux/device.h>
++#include <linux/workqueue.h>
+ enum {
+       PM_QOS_RESERVED = 0,
+@@ -29,6 +30,7 @@ enum {
+ struct pm_qos_request {
+       struct plist_node node;
+       int pm_qos_class;
++      struct delayed_work work; /* for pm_qos_update_request_timeout */
+ };
+ struct dev_pm_qos_request {
+@@ -73,6 +75,8 @@ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+                       s32 value);
+ void pm_qos_update_request(struct pm_qos_request *req,
+                          s32 new_value);
++void pm_qos_update_request_timeout(struct pm_qos_request *req,
++                                 s32 new_value, unsigned long timeout_us);
+ void pm_qos_remove_request(struct pm_qos_request *req);
+ int pm_qos_request(int pm_qos_class);
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index d6d6dbd..6a031e6 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -230,6 +230,21 @@ int pm_qos_request_active(struct pm_qos_request *req)
+ EXPORT_SYMBOL_GPL(pm_qos_request_active);
+ /**
++ * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
++ * @work: work struct for the delayed work (timeout)
++ *
++ * This cancels the timeout request by falling back to the default at timeout.
++ */
++static void pm_qos_work_fn(struct work_struct *work)
++{
++      struct pm_qos_request *req = container_of(to_delayed_work(work),
++                                                struct pm_qos_request,
++                                                work);
++
++      pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
++}
++
++/**
+  * pm_qos_add_request - inserts new qos request into the list
+  * @req: pointer to a preallocated handle
+  * @pm_qos_class: identifies which list of qos request to use
+@@ -253,6 +268,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
+               return;
+       }
+       req->pm_qos_class = pm_qos_class;
++      INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
+       pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
+                            &req->node, PM_QOS_ADD_REQ, value);
+ }
+@@ -279,6 +295,9 @@ void pm_qos_update_request(struct pm_qos_request *req,
+               return;
+       }
++      if (delayed_work_pending(&req->work))
++              cancel_delayed_work_sync(&req->work);
++
+       if (new_value != req->node.prio)
+               pm_qos_update_target(
+                       pm_qos_array[req->pm_qos_class]->constraints,
+@@ -287,6 +306,34 @@ void pm_qos_update_request(struct pm_qos_request *req,
+ EXPORT_SYMBOL_GPL(pm_qos_update_request);
+ /**
++ * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
++ * @req : handle to list element holding a pm_qos request to use
++ * @new_value: defines the temporal qos request
++ * @timeout_us: the effective duration of this qos request in usecs.
++ *
++ * After timeout_us, this qos request is cancelled automatically.
++ */
++void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
++                                 unsigned long timeout_us)
++{
++      if (!req)
++              return;
++      if (WARN(!pm_qos_request_active(req),
++               "%s called for unknown object.", __func__))
++              return;
++
++      if (delayed_work_pending(&req->work))
++              cancel_delayed_work_sync(&req->work);
++
++      if (new_value != req->node.prio)
++              pm_qos_update_target(
++                      pm_qos_array[req->pm_qos_class]->constraints,
++                      &req->node, PM_QOS_UPDATE_REQ, new_value);
++
++      schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
++}
++
++/**
+  * pm_qos_remove_request - modifies an existing qos request
+  * @req: handle to request list element
+  *
+@@ -305,6 +352,9 @@ void pm_qos_remove_request(struct pm_qos_request *req)
+               return;
+       }
++      if (delayed_work_pending(&req->work))
++              cancel_delayed_work_sync(&req->work);
++
+       pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
+                            &req->node, PM_QOS_REMOVE_REQ,
+                            PM_QOS_DEFAULT_VALUE);
+-- 
+1.7.10.1.362.g242cab3
+
diff --git a/series b/series
index 9d79672a8f69adbc5e3177bde161ddefbeae9256..eb8186287c141c03483315973fe902148405e02b 100644 (file)
--- a/series
+++ b/series
@@ -356,3 +356,213 @@ patches.lttng/0173-staging-lttng-TODO-update-lttng-reported-to-work-fin.patch
 patches.lttng/0174-staging-lttng-Update-max-symbol-length-to-256.patch
 patches.lttng/lttng-update-to-v2.0.1.patch
 
+patches.runtime_pm/0001-Revert-PM-Runtime-Automatically-retry-failed-autosus.patch
+patches.runtime_pm/0002-PM-Domains-Rename-struct-dev_power_domain-to-struct-.patch
+patches.runtime_pm/0003-PM-subsys_data-in-struct-dev_pm_info-need-not-depend.patch
+patches.runtime_pm/0004-PM-Domains-Support-for-generic-I-O-PM-domains-v8.patch
+patches.runtime_pm/0005-PM-Introduce-generic-noirq-callback-routines-for-sub.patch
+patches.runtime_pm/0006-PM-Domains-Move-code-from-under-ifdef-CONFIG_PM_RUNT.patch
+patches.runtime_pm/0007-PM-Domains-System-wide-transitions-support-for-gener.patch
+patches.runtime_pm/0008-PM-Domains-Wakeup-devices-support-for-system-sleep-t.patch
+patches.runtime_pm/0009-PM-Allow-the-clocks-management-code-to-be-used-durin.patch
+patches.runtime_pm/0010-PM-Rename-clock-management-functions.patch
+patches.runtime_pm/0011-PM-Runtime-Update-documentation-of-interactions-with.patch
+patches.runtime_pm/0012-PM-Runtime-Return-special-error-code-if-runtime-PM-i.patch
+patches.runtime_pm/0013-PM-Limit-race-conditions-between-runtime-PM-and-syst.patch
+patches.runtime_pm/0014-PM-Runtime-Improve-documentation-of-enable-disable-a.patch
+patches.runtime_pm/0015-PM-Runtime-Replace-run-time-with-runtime-in-document.patch
+patches.runtime_pm/0016-PM-Runtime-Prevent-runtime_resume-from-racing-with-p.patch
+patches.runtime_pm/0017-PM-Runtime-Consistent-utilization-of-deferred_resume.patch
+patches.runtime_pm/0018-PM-Domains-Export-pm_genpd_poweron-in-header.patch
+patches.runtime_pm/0019-doc-Konfig-Documentation-power-pm-apm-acpi-.txt.patch
+patches.runtime_pm/0020-PM-Domains-Set-device-state-to-active-during-system-.patch
+patches.runtime_pm/0021-PM-Domains-Make-failing-pm_genpd_prepare-clean-up-pr.patch
+patches.runtime_pm/0022-PM-Domains-Do-not-execute-device-callbacks-under-loc.patch
+patches.runtime_pm/0023-PM-Domains-Allow-callbacks-to-execute-all-runtime-PM.patch
+patches.runtime_pm/0024-PM-Domains-Do-not-restore-all-devices-on-power-off-e.patch
+patches.runtime_pm/0025-PM-Domains-Improve-handling-of-wakeup-devices-during.patch
+patches.runtime_pm/0026-PM-Domains-Queue-up-power-off-work-only-if-it-is-not.patch
+patches.runtime_pm/0027-PM-Runtime-Add-new-helper-function-pm_runtime_status.patch
+patches.runtime_pm/0028-PM-Domains-Introduce-function-to-power-off-all-unuse.patch
+patches.runtime_pm/0029-ARM-shmobile-Use-genpd_queue_power_off_work.patch
+patches.runtime_pm/0030-PM-Domains-Take-.power_off-error-code-into-account.patch
+patches.runtime_pm/0031-PM-OPP-Introduce-function-to-free-cpufreq-table.patch
+patches.runtime_pm/0032-PM-Suspend-Add-.suspend_again-callback-to-suspend_op.patch
+patches.runtime_pm/0033-PM-Suspend-Export-suspend_set_ops-suspend_valid_only.patch
+patches.runtime_pm/0034-PM-Add-RTC-to-PM-trace-time-stamps-to-avoid-confusio.patch
+patches.runtime_pm/0035-PM-Improve-error-code-of-pm_notifier_call_chain.patch
+patches.runtime_pm/0036-drivers-base-power-opp.c-fix-dev_opp-initial-value.patch
+patches.runtime_pm/0037-PM-Domains-Fix-pm_genpd_poweron.patch
+patches.runtime_pm/0038-PM-Runtime-Allow-_put_sync-from-interrupts-disabled-.patch
+patches.runtime_pm/0039-PM-Domains-Fix-build-for-CONFIG_PM_RUNTIME-unset.patch
+patches.runtime_pm/0040-PM-Runtime-Add-might_sleep-to-runtime-PM-functions.patch
+patches.runtime_pm/0041-PM-Runtime-Add-macro-to-test-for-runtime-PM-events.patch
+patches.runtime_pm/0042-PM-Use-spinlock-instead-of-mutex-in-clock-management.patch
+patches.runtime_pm/0043-PM-Runtime-Correct-documentation-of-pm_runtime_irq_s.patch
+patches.runtime_pm/0044-PM-Domains-Implement-subdomain-counters-as-atomic-fi.patch
+patches.runtime_pm/0045-PM-Domains-Do-not-take-parent-locks-to-modify-subdom.patch
+patches.runtime_pm/0046-PM-Domains-Make-pm_genpd_poweron-always-survive-pare.patch
+patches.runtime_pm/0047-PM-Domains-Add-wait-for-parent-status-for-generic-PM.patch
+patches.runtime_pm/0048-PM-Domains-Allow-generic-PM-domains-to-have-multiple.patch
+patches.runtime_pm/0049-PM-Domains-Rename-GPD_STATE_WAIT_PARENT-to-GPD_STATE.patch
+patches.runtime_pm/0050-PM-Domains-Rename-argument-of-pm_genpd_add_subdomain.patch
+patches.runtime_pm/0051-PM-Introduce-struct-pm_subsys_data.patch
+patches.runtime_pm/0052-PM-Reference-counting-of-power.subsys_data.patch
+patches.runtime_pm/0053-PM-Domains-Use-power.sybsys_data-to-reduce-overhead.patch
+patches.runtime_pm/0054-PM-QoS-Move-and-rename-the-implementation-files.patch
+patches.runtime_pm/0055-plist-Remove-the-need-to-supply-locks-to-plist-heads.patch
+patches.runtime_pm/0056-PM-QoS-Minor-clean-ups.patch
+patches.runtime_pm/0057-PM-QoS-Code-reorganization.patch
+patches.runtime_pm/0058-PM-QoS-Reorganize-data-structs.patch
+patches.runtime_pm/0059-PM-QoS-Generalize-and-export-constraints-management-.patch
+patches.runtime_pm/0060-PM-QoS-Implement-per-device-PM-QoS-constraints.patch
+patches.runtime_pm/0061-PM-QoS-Add-global-notification-mechanism-for-device-.patch
+patches.runtime_pm/0062-PM-Domains-Preliminary-support-for-devices-with-powe.patch
+patches.runtime_pm/0063-PM-Runtime-pm_runtime_idle-can-be-called-in-atomic-c.patch
+patches.runtime_pm/0064-cpu_pm-Add-cpu-power-management-notifiers.patch
+patches.runtime_pm/0065-PM-Clocks-Do-not-acquire-a-mutex-under-a-spinlock.patch
+patches.runtime_pm/0066-PM-Domains-Split-device-PM-domain-data-into-base-and.patch
+patches.runtime_pm/0067-doc-fix-broken-references.patch
+patches.runtime_pm/0068-PM-Runtime-Don-t-run-callbacks-under-lock-for-power..patch
+patches.runtime_pm/0069-PM-Runtime-Introduce-trace-points-for-tracing-rpm_-f.patch
+patches.runtime_pm/0070-PM-Tracing-build-rpm-traces.c-only-if-CONFIG_PM_RUNT.patch
+patches.runtime_pm/0071-PM-Runtime-Replace-dev_dbg-with-trace_rpm_.patch
+patches.runtime_pm/0072-PM-OPP-Add-OPP-availability-change-notifier.patch
+patches.runtime_pm/0073-PM-OPP-Fix-build-when-CONFIG_PM_OPP-is-not-set.patch
+patches.runtime_pm/0074-PM-QoS-Add-function-dev_pm_qos_read_value-v3.patch
+patches.runtime_pm/0075-PM-QoS-Update-Documentation-for-the-pm_qos-and-dev_p.patch
+patches.runtime_pm/0076-regulator-Fix-some-bitrot-in-the-machine-driver-docu.patch
+patches.runtime_pm/0077-regulator-Clarify-documentation-for-regulator-regula.patch
+patches.runtime_pm/0078-PM-Runtime-Update-document-about-callbacks.patch
+patches.runtime_pm/0079-PM-Runtime-Fix-kerneldoc-comment-for-rpm_suspend.patch
+patches.runtime_pm/0080-PM-Runtime-Handle-.runtime_suspend-failure-correctly.patch
+patches.runtime_pm/0081-PM-Suspend-Add-statistics-debugfs-file-for-suspend-t.patch
+patches.runtime_pm/0082-PM-Fix-build-issue-in-main.c-for-CONFIG_PM_SLEEP-uns.patch
+patches.runtime_pm/0083-PM-Hibernate-Include-storage-keys-in-hibernation-ima.patch
+patches.runtime_pm/0084-PM-VT-Cleanup-if-defined-uglyness-and-fix-compile-er.patch
+patches.runtime_pm/0085-PM-Update-the-policy-on-default-wakeup-settings.patch
+patches.runtime_pm/0086-PM-Hibernate-Freeze-kernel-threads-after-preallocati.patch
+patches.runtime_pm/0087-PM-Hibernate-Fix-typo-in-a-kerneldoc-comment.patch
+patches.runtime_pm/0088-PM-Hibernate-Add-resumewait-param-to-support-MMC-lik.patch
+patches.runtime_pm/0089-PM-Hibernate-Add-resumedelay-kernel-param-in-additio.patch
+patches.runtime_pm/0090-PM-Hibernate-Do-not-initialize-static-and-extern-var.patch
+patches.runtime_pm/0091-PM-Hibernate-Improve-performance-of-LZO-plain-hibern.patch
+patches.runtime_pm/0092-PM-Sleep-Mark-devices-involved-in-wakeup-signaling-d.patch
+patches.runtime_pm/0093-PM-Documentation-Update-docs-about-suspend-and-CPU-h.patch
+patches.runtime_pm/0094-PM-Clocks-Remove-redundant-NULL-checks-before-kfree.patch
+patches.runtime_pm/0095-kernel-fix-several-implicit-usasges-of-kmod.h.patch
+patches.runtime_pm/0096-kernel-Fix-files-explicitly-needing-EXPORT_SYMBOL-in.patch
+patches.runtime_pm/0097-drivers-base-Add-export.h-for-EXPORT_SYMBOL-THIS_MOD.patch
+patches.runtime_pm/0098-drivers-base-change-module.h-export.h-in-power-commo.patch
+patches.runtime_pm/0099-pm_runtime.h-explicitly-requires-notifier.h.patch
+patches.runtime_pm/0100-PM-Sleep-Update-freezer-documentation.patch
+patches.runtime_pm/0101-PM-Runtime-Fix-runtime-accounting-calculation-error.patch
+patches.runtime_pm/0102-PM-QoS-Remove-redundant-check.patch
+patches.runtime_pm/0103-PM-Runtime-Automatically-retry-failed-autosuspends.patch
+patches.runtime_pm/0104-PM-QoS-Set-cpu_dma_pm_qos-name.patch
+patches.runtime_pm/0105-PM-OPP-Use-ERR_CAST-instead-of-ERR_PTR-PTR_ERR.patch
+patches.runtime_pm/0106-PM-Clocks-Only-disable-enabled-clocks-in-pm_clk_susp.patch
+patches.runtime_pm/0107-PM-QoS-Properly-use-the-WARN-macro-in-dev_pm_qos_add.patch
+patches.runtime_pm/0108-PM-Sleep-Do-not-extend-wakeup-paths-to-devices-with-.patch
+patches.runtime_pm/0109-PM-Hibernate-Fix-the-early-termination-of-test-modes.patch
+patches.runtime_pm/0110-PM-Suspend-Fix-bug-in-suspend-statistics-update.patch
+patches.runtime_pm/0111-freezer-don-t-unnecessarily-set-PF_NOFREEZE-explicit.patch
+patches.runtime_pm/0112-freezer-fix-current-state-restoration-race-in-refrig.patch
+patches.runtime_pm/0113-freezer-unexport-refrigerator-and-update-try_to_free.patch
+patches.runtime_pm/0114-oom-thaw-threads-if-oom-killed-thread-is-frozen-befo.patch
+patches.runtime_pm/0115-freezer-implement-and-use-kthread_freezable_should_s.patch
+patches.runtime_pm/0116-freezer-rename-thaw_process-to-__thaw_task-and-simpl.patch
+patches.runtime_pm/0117-freezer-remove-racy-clear_freeze_flag-and-set-PF_NOF.patch
+patches.runtime_pm/0118-freezer-don-t-distinguish-nosig-tasks-on-thaw.patch
+patches.runtime_pm/0119-freezer-use-dedicated-lock-instead-of-task_lock-memo.patch
+patches.runtime_pm/0120-freezer-make-freezing-indicate-freeze-condition-in-e.patch
+patches.runtime_pm/0121-freezer-test-freezable-conditions-while-holding-free.patch
+patches.runtime_pm/0122-freezer-clean-up-freeze_processes-failure-path.patch
+patches.runtime_pm/0123-cgroup_freezer-prepare-for-removal-of-TIF_FREEZE.patch
+patches.runtime_pm/0124-freezer-make-freezing-test-freeze-conditions-in-effe.patch
+patches.runtime_pm/0125-Freezer-fix-more-fallout-from-the-thaw_process-renam.patch
+patches.runtime_pm/0126-freezer-remove-unused-sig_only-from-freeze_task.patch
+patches.runtime_pm/0127-PM-Hibernate-Do-not-leak-memory-in-error-test-code-p.patch
+patches.runtime_pm/0128-PM-Fix-indentation-and-remove-extraneous-whitespaces.patch
+patches.runtime_pm/0129-PM-Sleep-Remove-unnecessary-label-and-jumps-to-it-fo.patch
+patches.runtime_pm/0130-PM-Sleep-Simplify-device_suspend_noirq.patch
+patches.runtime_pm/0131-PM-Hibernate-Refactor-and-simplify-hibernation_snaps.patch
+patches.runtime_pm/0132-PM-Domains-Document-how-PM-domains-are-used-by-the-P.patch
+patches.runtime_pm/0133-PM-Sleep-Correct-inaccurate-information-in-devices.t.patch
+patches.runtime_pm/0134-PM-Runtime-Make-documentation-follow-the-new-behavio.patch
+patches.runtime_pm/0135-PM-Sleep-Update-documentation-related-to-system-wake.patch
+patches.runtime_pm/0136-PM-Update-comments-describing-device-power-managemen.patch
+patches.runtime_pm/0137-PM-Runtime-Use-device-PM-QoS-constraints-v2.patch
+patches.runtime_pm/0138-PM-Domains-Make-it-possible-to-use-per-device-domain.patch
+patches.runtime_pm/0139-PM-Domains-Introduce-save-restore-state-device-callb.patch
+patches.runtime_pm/0140-PM-Domains-Rework-system-suspend-callback-routines-v.patch
+patches.runtime_pm/0141-PM-Domains-Add-device-stop-governor-function-v4.patch
+patches.runtime_pm/0142-PM-Domains-Add-default-power-off-governor-function-v.patch
+patches.runtime_pm/0143-PM-Domains-Automatically-update-overoptimistic-laten.patch
+patches.runtime_pm/0144-PM-Domains-fix-compilation-failure-for-CONFIG_PM_GEN.patch
+patches.runtime_pm/0145-regulator-Fix-regulator_register-API-signature-in-Do.patch
+patches.runtime_pm/0146-PM-Hibernate-Enable-usermodehelpers-in-software_resu.patch
+patches.runtime_pm/0147-PM-Hibernate-Thaw-processes-in-SNAPSHOT_CREATE_IMAGE.patch
+patches.runtime_pm/0148-PM-Hibernate-Remove-deprecated-hibernation-test-mode.patch
+patches.runtime_pm/0149-PM-Sleep-Unify-diagnostic-messages-from-device-suspe.patch
+patches.runtime_pm/0150-PM-Hibernate-Replace-unintuitive-if-condition-in-ker.patch
+patches.runtime_pm/0151-PM-Domains-Make-it-possible-to-assign-names-to-gener.patch
+patches.runtime_pm/0152-PM-Domains-Fix-default-system-suspend-resume-operati.patch
+patches.runtime_pm/0153-PM-Sleep-Replace-mutex_-un-lock-pm_mutex-with-un-loc.patch
+patches.runtime_pm/0154-PM-Sleep-Recommend-un-lock_system_sleep-over-using-p.patch
+patches.runtime_pm/0155-PM-Domains-Provide-an-always-on-power-domain-governo.patch
+patches.runtime_pm/0156-PM-Hibernate-Remove-deprecated-hibernation-snapshot-.patch
+patches.runtime_pm/0157-PM-Sleep-Simplify-generic-system-suspend-callbacks.patch
+patches.runtime_pm/0158-PM-Sleep-Merge-internal-functions-in-generic_ops.c.patch
+patches.runtime_pm/0159-PM-Sleep-Make-pm_op-and-pm_noirq_op-return-callback-.patch
+patches.runtime_pm/0160-PM-Run-the-driver-callback-directly-if-the-subsystem.patch
+patches.runtime_pm/0161-PM-Drop-generic_subsys_pm_ops.patch
+patches.runtime_pm/0162-PM-QoS-Introduce-dev_pm_qos_add_ancestor_request.patch
+patches.runtime_pm/0163-power_supply-Add-initial-Charger-Manager-driver.patch
+patches.runtime_pm/0164-PM-Hibernate-Implement-compat_ioctl-for-dev-snapshot.patch
+patches.runtime_pm/0165-mm-more-intensive-memory-corruption-debugging.patch
+patches.runtime_pm/0166-PM-Hibernate-do-not-count-debug-pages-as-savable.patch
+patches.runtime_pm/0167-power_supply-Charger-Manager-Add-properties-for-powe.patch
+patches.runtime_pm/0168-PM-Domains-Fix-build-for-CONFIG_PM_SLEEP-unset.patch
+patches.runtime_pm/0169-PM-Domains-Skip-governor-functions-for-CONFIG_PM_RUN.patch
+patches.runtime_pm/0170-PM-Documentation-Fix-spelling-mistake-in-basic-pm-de.patch
+patches.runtime_pm/0171-PM-Documentation-Fix-minor-issue-in-freezing_of_task.patch
+patches.runtime_pm/0172-PM-Hibernate-Correct-additional-pages-number-calcula.patch
+patches.runtime_pm/0173-PM-Domains-Add-OF-support.patch
+patches.runtime_pm/0174-PM-Hibernate-Fix-s2disk-regression-related-to-freezi.patch
+patches.runtime_pm/0175-PM-Sleep-Introduce-late-suspend-and-early-resume-of-.patch
+patches.runtime_pm/0176-PM-Sleep-Introduce-generic-callbacks-for-new-device-.patch
+patches.runtime_pm/0177-PM-Domains-Run-late-early-device-suspend-callbacks-a.patch
+patches.runtime_pm/0178-PM-QoS-Simplify-PM-QoS-expansion-merge.patch
+patches.runtime_pm/0179-PM-Hibernate-Thaw-kernel-threads-in-SNAPSHOT_CREATE_.patch
+patches.runtime_pm/0180-PM-Freezer-Thaw-only-kernel-threads-if-freezing-of-k.patch
+patches.runtime_pm/0181-PM-QoS-CPU-C-state-breakage-with-PM-Qos-change.patch
+patches.runtime_pm/0182-PM-Suspend-Avoid-code-duplication-in-suspend-statist.patch
+patches.runtime_pm/0183-PM-Freezer-Docs-Document-the-beauty-of-freeze-thaw-s.patch
+patches.runtime_pm/0184-PM-Hibernate-Thaw-kernel-threads-in-hibernation_snap.patch
+patches.runtime_pm/0185-PM-Hibernate-Refactor-and-simplify-freezer_test_done.patch
+patches.runtime_pm/0186-PM-Domains-Provide-a-dummy-dev_gpd_data-when-generic.patch
+patches.runtime_pm/0187-PM-Make-sysrq-o-be-available-for-CONFIG_PM-unset.patch
+patches.runtime_pm/0188-PM-QoS-unconditionally-build-the-feature.patch
+patches.runtime_pm/0189-PM-Sleep-Initialize-wakeup-source-locks-in-wakeup_so.patch
+patches.runtime_pm/0190-PM-Sleep-Do-not-check-wakeup-too-often-in-try_to_fre.patch
+patches.runtime_pm/0191-PM-Sleep-Remove-unnecessary-label-from-suspend_freez.patch
+patches.runtime_pm/0192-PM-Sleep-Unify-kerneldoc-comments-in-kernel-power-su.patch
+patches.runtime_pm/0193-PM-Sleep-Make-enter_state-in-kernel-power-suspend.c-.patch
+patches.runtime_pm/0194-PM-Sleep-Drop-suspend_stats_update.patch
+patches.runtime_pm/0195-PM-Add-comment-describing-relationships-between-PM-c.patch
+patches.runtime_pm/0196-PM-Hibernate-print-physical-addresses-consistently-w.patch
+patches.runtime_pm/0197-PM-Sleep-Fix-possible-infinite-loop-during-wakeup-so.patch
+patches.runtime_pm/0198-PM-Sleep-Fix-race-conditions-related-to-wakeup-sourc.patch
+patches.runtime_pm/0199-PM-Sleep-Add-more-wakeup-source-initialization-routi.patch
+patches.runtime_pm/0200-PM-Freezer-Remove-references-to-TIF_FREEZE-in-commen.patch
+patches.runtime_pm/0201-PM-Domains-Fix-include-for-PM_GENERIC_DOMAINS-n-case.patch
+patches.runtime_pm/0202-PM-QoS-Make-it-possible-to-expose-PM-QoS-latency-con.patch
+patches.runtime_pm/0203-PM-Domains-Fix-handling-of-wakeup-devices-during-sys.patch
+patches.runtime_pm/0204-PM-Domains-Fix-hibernation-restore-of-devices-v2.patch
+patches.runtime_pm/0205-PM-Domains-Introduce-always-on-device-flag.patch
+patches.runtime_pm/0206-PM-Domains-Check-domain-status-during-hibernation-re.patch
+patches.runtime_pm/0207-PM-Runtime-don-t-forget-to-wake-up-waitqueue-on-fail.patch
+patches.runtime_pm/0208-PM-Hibernate-Disable-usermode-helpers-right-before-f.patch
+patches.runtime_pm/0209-PM-Sleep-Move-disabling-of-usermode-helpers-to-the-f.patch
+patches.runtime_pm/0210-PM-QoS-add-pm_qos_update_request_timeout-API.patch