1 // SPDX-License-Identifier: GPL-2.0-only
3 * sleep.c - ACPI sleep support.
5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
7 * Copyright (c) 2000-2003 Patrick Mochel
8 * Copyright (c) 2003 Open Source Development Lab
11 #define pr_fmt(fmt) "ACPI: PM: " fmt
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/dmi.h>
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/suspend.h>
19 #include <linux/reboot.h>
20 #include <linux/acpi.h>
21 #include <linux/module.h>
22 #include <linux/syscore_ops.h>
24 #include <trace/events/power.h>
30 * Some HW-full platforms do not have _S5, so they may need
31 * to leverage efi power off for a shutdown.
34 static u8 sleep_states[ACPI_S_STATE_COUNT];
36 static void acpi_sleep_tts_switch(u32 acpi_state)
40 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
41 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
43 * OS can't evaluate the _TTS object correctly. Some warning
44 * message will be printed. But it won't break anything.
46 pr_notice("Failure in evaluating _TTS object\n");
50 static int tts_notify_reboot(struct notifier_block *this,
51 unsigned long code, void *x)
53 acpi_sleep_tts_switch(ACPI_STATE_S5);
57 static struct notifier_block tts_notifier = {
58 .notifier_call = tts_notify_reboot,
63 #ifndef acpi_skip_set_wakeup_address
64 #define acpi_skip_set_wakeup_address() false
67 static int acpi_sleep_prepare(u32 acpi_state)
69 #ifdef CONFIG_ACPI_SLEEP
70 unsigned long acpi_wakeup_address;
72 /* do we have a wakeup address for S2 and S3? */
73 if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
74 acpi_wakeup_address = acpi_get_wakeup_address();
75 if (!acpi_wakeup_address)
77 acpi_set_waking_vector(acpi_wakeup_address);
81 pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
82 acpi_enable_wakeup_devices(acpi_state);
83 acpi_enter_sleep_state_prep(acpi_state);
87 bool acpi_sleep_state_supported(u8 sleep_state)
92 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
93 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
94 || (acpi_gbl_FADT.sleep_control.address
95 && acpi_gbl_FADT.sleep_status.address));
98 #ifdef CONFIG_ACPI_SLEEP
99 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
101 u32 acpi_target_system_state(void)
103 return acpi_target_sleep_state;
105 EXPORT_SYMBOL_GPL(acpi_target_system_state);
107 static bool pwr_btn_event_pending;
110 * The ACPI specification wants us to save NVS memory regions during hibernation
111 * and to restore them during the subsequent resume. Windows does that also for
112 * suspend to RAM. However, it is known that this mechanism does not work on
113 * all machines, so we allow the user to disable it with the help of the
114 * 'acpi_sleep=nonvs' kernel command line option.
116 static bool nvs_nosave;
118 void __init acpi_nvs_nosave(void)
124 * The ACPI specification wants us to save NVS memory regions during hibernation
125 * but says nothing about saving NVS during S3. Not all versions of Windows
126 * save NVS on S3 suspend either, and it is clear that not all systems need
127 * NVS to be saved at S3 time. To improve suspend/resume time, allow the
128 * user to disable saving NVS on S3 if their system does not require it, but
129 * continue to save/restore NVS for S4 as specified.
131 static bool nvs_nosave_s3;
133 void __init acpi_nvs_nosave_s3(void)
135 nvs_nosave_s3 = true;
138 static int __init init_nvs_save_s3(const struct dmi_system_id *d)
140 nvs_nosave_s3 = false;
145 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
146 * user to request that behavior by using the 'acpi_old_suspend_ordering'
147 * kernel command line option that causes the following variable to be set.
149 static bool old_suspend_ordering;
151 void __init acpi_old_suspend_ordering(void)
153 old_suspend_ordering = true;
156 static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
158 acpi_old_suspend_ordering();
162 static int __init init_nvs_nosave(const struct dmi_system_id *d)
168 bool acpi_sleep_default_s3;
170 static int __init init_default_s3(const struct dmi_system_id *d)
172 acpi_sleep_default_s3 = true;
176 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
178 .callback = init_old_suspend_ordering,
179 .ident = "Abit KN9 (nForce4 variant)",
181 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
182 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
186 .callback = init_old_suspend_ordering,
187 .ident = "HP xw4600 Workstation",
189 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
190 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
194 .callback = init_old_suspend_ordering,
195 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
197 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
198 DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
202 .callback = init_old_suspend_ordering,
203 .ident = "Panasonic CF51-2L",
205 DMI_MATCH(DMI_BOARD_VENDOR,
206 "Matsushita Electric Industrial Co.,Ltd."),
207 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
211 .callback = init_nvs_nosave,
212 .ident = "Sony Vaio VGN-FW41E_H",
214 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
215 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
219 .callback = init_nvs_nosave,
220 .ident = "Sony Vaio VGN-FW21E",
222 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
223 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
227 .callback = init_nvs_nosave,
228 .ident = "Sony Vaio VGN-FW21M",
230 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
231 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
235 .callback = init_nvs_nosave,
236 .ident = "Sony Vaio VPCEB17FX",
238 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
239 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
243 .callback = init_nvs_nosave,
244 .ident = "Sony Vaio VGN-SR11M",
246 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
247 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
251 .callback = init_nvs_nosave,
252 .ident = "Everex StepNote Series",
254 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
255 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
259 .callback = init_nvs_nosave,
260 .ident = "Sony Vaio VPCEB1Z1E",
262 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
263 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
267 .callback = init_nvs_nosave,
268 .ident = "Sony Vaio VGN-NW130D",
270 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
271 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
275 .callback = init_nvs_nosave,
276 .ident = "Sony Vaio VPCCW29FX",
278 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
279 DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
283 .callback = init_nvs_nosave,
284 .ident = "Averatec AV1020-ED2",
286 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
287 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
291 .callback = init_old_suspend_ordering,
292 .ident = "Asus A8N-SLI DELUXE",
294 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
295 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
299 .callback = init_old_suspend_ordering,
300 .ident = "Asus A8N-SLI Premium",
302 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
303 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
307 .callback = init_nvs_nosave,
308 .ident = "Sony Vaio VGN-SR26GN_P",
310 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
311 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
315 .callback = init_nvs_nosave,
316 .ident = "Sony Vaio VPCEB1S1E",
318 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
319 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
323 .callback = init_nvs_nosave,
324 .ident = "Sony Vaio VGN-FW520F",
326 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
327 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
331 .callback = init_nvs_nosave,
332 .ident = "Asus K54C",
334 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
335 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
339 .callback = init_nvs_nosave,
340 .ident = "Asus K54HR",
342 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
347 .callback = init_nvs_save_s3,
348 .ident = "Asus 1025C",
350 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
351 DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
355 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
356 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
360 .callback = init_nvs_save_s3,
361 .ident = "Lenovo G50-45",
363 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
364 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
368 .callback = init_nvs_save_s3,
369 .ident = "Lenovo G40-45",
371 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
372 DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
376 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
377 * the Low Power S0 Idle firmware interface (see
378 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
381 .callback = init_default_s3,
382 .ident = "ThinkPad X1 Tablet(2016)",
384 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
385 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
389 * ASUS B1400CEAE hangs on resume from suspend (see
390 * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
393 .callback = init_default_s3,
394 .ident = "ASUS B1400CEAE",
396 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
397 DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
403 static bool ignore_blacklist;
405 void __init acpi_sleep_no_blacklist(void)
407 ignore_blacklist = true;
410 static void __init acpi_sleep_dmi_check(void)
412 if (ignore_blacklist)
415 if (dmi_get_bios_year() >= 2012)
416 acpi_nvs_nosave_s3();
418 dmi_check_system(acpisleep_dmi_table);
422 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
424 static int acpi_pm_freeze(void)
426 acpi_disable_all_gpes();
427 acpi_os_wait_events_complete();
428 acpi_ec_block_transactions();
433 * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
435 static int acpi_pm_pre_suspend(void)
438 return suspend_nvs_save();
442 * __acpi_pm_prepare - Prepare the platform to enter the target state.
444 * If necessary, set the firmware waking vector and do arch-specific
445 * nastiness to get the wakeup code to the waking vector.
447 static int __acpi_pm_prepare(void)
449 int error = acpi_sleep_prepare(acpi_target_sleep_state);
451 acpi_target_sleep_state = ACPI_STATE_S0;
457 * acpi_pm_prepare - Prepare the platform to enter the target sleep
458 * state and disable the GPEs.
460 static int acpi_pm_prepare(void)
462 int error = __acpi_pm_prepare();
464 error = acpi_pm_pre_suspend();
470 * acpi_pm_finish - Instruct the platform to leave a sleep state.
472 * This is called after we wake back up (or if entering the sleep state
475 static void acpi_pm_finish(void)
477 struct acpi_device *pwr_btn_adev;
478 u32 acpi_state = acpi_target_sleep_state;
480 acpi_ec_unblock_transactions();
483 if (acpi_state == ACPI_STATE_S0)
486 pr_info("Waking up from system sleep state S%d\n", acpi_state);
487 acpi_disable_wakeup_devices(acpi_state);
488 acpi_leave_sleep_state(acpi_state);
490 /* reset firmware waking vector */
491 acpi_set_waking_vector(0);
493 acpi_target_sleep_state = ACPI_STATE_S0;
495 acpi_resume_power_resources();
497 /* If we were woken with the fixed power button, provide a small
498 * hint to userspace in the form of a wakeup event on the fixed power
499 * button device (if it can be found).
501 * We delay the event generation til now, as the PM layer requires
502 * timekeeping to be running before we generate events. */
503 if (!pwr_btn_event_pending)
506 pwr_btn_event_pending = false;
507 pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
510 pm_wakeup_event(&pwr_btn_adev->dev, 0);
511 acpi_dev_put(pwr_btn_adev);
516 * acpi_pm_start - Start system PM transition.
518 static void acpi_pm_start(u32 acpi_state)
520 acpi_target_sleep_state = acpi_state;
521 acpi_sleep_tts_switch(acpi_target_sleep_state);
522 acpi_scan_lock_acquire();
526 * acpi_pm_end - Finish up system PM transition.
528 static void acpi_pm_end(void)
530 acpi_turn_off_unused_power_resources();
531 acpi_scan_lock_release();
533 * This is necessary in case acpi_pm_finish() is not called during a
534 * failing transition to a sleep state.
536 acpi_target_sleep_state = ACPI_STATE_S0;
537 acpi_sleep_tts_switch(acpi_target_sleep_state);
539 #else /* !CONFIG_ACPI_SLEEP */
540 #define sleep_no_lps0 (1)
541 #define acpi_target_sleep_state ACPI_STATE_S0
542 #define acpi_sleep_default_s3 (1)
543 static inline void acpi_sleep_dmi_check(void) {}
544 #endif /* CONFIG_ACPI_SLEEP */
546 #ifdef CONFIG_SUSPEND
547 static u32 acpi_suspend_states[] = {
548 [PM_SUSPEND_ON] = ACPI_STATE_S0,
549 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
550 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
551 [PM_SUSPEND_MAX] = ACPI_STATE_S5
555 * acpi_suspend_begin - Set the target system sleep state to the state
556 * associated with given @pm_state, if supported.
558 static int acpi_suspend_begin(suspend_state_t pm_state)
560 u32 acpi_state = acpi_suspend_states[pm_state];
563 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
567 if (!sleep_states[acpi_state]) {
568 pr_err("ACPI does not support sleep state S%u\n", acpi_state);
571 if (acpi_state > ACPI_STATE_S1)
572 pm_set_suspend_via_firmware();
574 acpi_pm_start(acpi_state);
579 * acpi_suspend_enter - Actually enter a sleep state.
582 * Flush caches and go to sleep. For STR we have to call arch-specific
583 * assembly, which in turn call acpi_enter_sleep_state().
584 * It's unfortunate, but it works. Please fix if you're feeling frisky.
586 static int acpi_suspend_enter(suspend_state_t pm_state)
588 acpi_status status = AE_OK;
589 u32 acpi_state = acpi_target_sleep_state;
592 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
593 switch (acpi_state) {
596 status = acpi_enter_sleep_state(acpi_state);
600 if (!acpi_suspend_lowlevel)
602 error = acpi_suspend_lowlevel();
605 pr_info("Low-level resume complete\n");
606 pm_set_resume_via_firmware();
609 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
611 /* This violates the spec but is required for bug compatibility. */
612 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
614 /* Reprogram control registers */
615 acpi_leave_sleep_state_prep(acpi_state);
617 /* ACPI 3.0 specs (P62) says that it's the responsibility
618 * of the OSPM to clear the status bit [ implying that the
619 * POWER_BUTTON event should not reach userspace ]
621 * However, we do generate a small hint for userspace in the form of
622 * a wakeup event. We flag this condition for now and generate the
623 * event later, as we're currently too early in resume to be able to
624 * generate wakeup events.
626 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
627 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
629 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
631 if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
632 acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
634 pwr_btn_event_pending = true;
639 * Disable all GPE and clear their status bits before interrupts are
640 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
641 * prevent them from producing spurious interrups.
643 * acpi_leave_sleep_state() will reenable specific GPEs later.
645 * Because this code runs on one CPU with disabled interrupts (all of
646 * the other CPUs are offline at this time), it need not acquire any
647 * sleeping locks which may trigger an implicit preemption point even
648 * if there is no contention, so avoid doing that by using a low-level
649 * library routine here.
651 acpi_hw_disable_all_gpes();
652 /* Allow EC transactions to happen. */
653 acpi_ec_unblock_transactions();
655 suspend_nvs_restore();
657 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
660 static int acpi_suspend_state_valid(suspend_state_t pm_state)
666 case PM_SUSPEND_STANDBY:
668 acpi_state = acpi_suspend_states[pm_state];
670 return sleep_states[acpi_state];
676 static const struct platform_suspend_ops acpi_suspend_ops = {
677 .valid = acpi_suspend_state_valid,
678 .begin = acpi_suspend_begin,
679 .prepare_late = acpi_pm_prepare,
680 .enter = acpi_suspend_enter,
681 .wake = acpi_pm_finish,
686 * acpi_suspend_begin_old - Set the target system sleep state to the
687 * state associated with given @pm_state, if supported, and
688 * execute the _PTS control method. This function is used if the
689 * pre-ACPI 2.0 suspend ordering has been requested.
691 static int acpi_suspend_begin_old(suspend_state_t pm_state)
693 int error = acpi_suspend_begin(pm_state);
695 error = __acpi_pm_prepare();
701 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
704 static const struct platform_suspend_ops acpi_suspend_ops_old = {
705 .valid = acpi_suspend_state_valid,
706 .begin = acpi_suspend_begin_old,
707 .prepare_late = acpi_pm_pre_suspend,
708 .enter = acpi_suspend_enter,
709 .wake = acpi_pm_finish,
711 .recover = acpi_pm_finish,
714 static bool s2idle_wakeup;
716 int acpi_s2idle_begin(void)
718 acpi_scan_lock_acquire();
722 int acpi_s2idle_prepare(void)
724 if (acpi_sci_irq_valid()) {
727 error = enable_irq_wake(acpi_sci_irq);
729 pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
730 acpi_sci_irq, error);
732 acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
735 acpi_enable_wakeup_devices(ACPI_STATE_S0);
737 /* Change the configuration of GPEs to avoid spurious wakeup. */
738 acpi_enable_all_wakeup_gpes();
739 acpi_os_wait_events_complete();
741 s2idle_wakeup = true;
745 bool acpi_s2idle_wake(void)
747 if (!acpi_sci_irq_valid())
748 return pm_wakeup_pending();
750 while (pm_wakeup_pending()) {
752 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
753 * SCI has not triggered while suspended, so bail out (the
754 * wakeup is pending anyway and the SCI is not the source of
757 if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
758 pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
763 * If the status bit of any enabled fixed event is set, the
764 * wakeup is regarded as valid.
766 if (acpi_any_fixed_event_status_set()) {
767 pm_pr_dbg("ACPI fixed event wakeup\n");
771 /* Check wakeups from drivers sharing the SCI. */
772 if (acpi_check_wakeup_handlers()) {
773 pm_pr_dbg("ACPI custom handler wakeup\n");
778 * Check non-EC GPE wakeups and if there are none, cancel the
779 * SCI-related wakeup and dispatch the EC GPE.
781 if (acpi_ec_dispatch_gpe()) {
782 pm_pr_dbg("ACPI non-EC GPE wakeup\n");
786 acpi_os_wait_events_complete();
789 * The SCI is in the "suspended" state now and it cannot produce
790 * new wakeup events till the rearming below, so if any of them
791 * are pending here, they must be resulting from the processing
792 * of EC events above or coming from somewhere else.
794 if (pm_wakeup_pending()) {
795 pm_pr_dbg("Wakeup after ACPI Notify sync\n");
799 pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
801 pm_wakeup_clear(acpi_sci_irq);
802 rearm_wake_irq(acpi_sci_irq);
808 void acpi_s2idle_restore(void)
811 * Drain pending events before restoring the working-state configuration
814 acpi_os_wait_events_complete(); /* synchronize GPE processing */
815 acpi_ec_flush_work(); /* flush the EC driver's workqueues */
816 acpi_os_wait_events_complete(); /* synchronize Notify handling */
818 s2idle_wakeup = false;
820 acpi_enable_all_runtime_gpes();
822 acpi_disable_wakeup_devices(ACPI_STATE_S0);
824 if (acpi_sci_irq_valid()) {
825 acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
826 disable_irq_wake(acpi_sci_irq);
830 void acpi_s2idle_end(void)
832 acpi_scan_lock_release();
835 static const struct platform_s2idle_ops acpi_s2idle_ops = {
836 .begin = acpi_s2idle_begin,
837 .prepare = acpi_s2idle_prepare,
838 .wake = acpi_s2idle_wake,
839 .restore = acpi_s2idle_restore,
840 .end = acpi_s2idle_end,
843 void __weak acpi_s2idle_setup(void)
845 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
846 pr_info("Efficient low-power S0 idle declared\n");
848 s2idle_set_ops(&acpi_s2idle_ops);
851 static void __init acpi_sleep_suspend_setup(void)
853 bool suspend_ops_needed = false;
856 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
857 if (acpi_sleep_state_supported(i)) {
859 suspend_ops_needed = true;
862 if (suspend_ops_needed)
863 suspend_set_ops(old_suspend_ordering ?
864 &acpi_suspend_ops_old : &acpi_suspend_ops);
869 #else /* !CONFIG_SUSPEND */
870 #define s2idle_wakeup (false)
871 static inline void acpi_sleep_suspend_setup(void) {}
872 #endif /* !CONFIG_SUSPEND */
874 bool acpi_s2idle_wakeup(void)
876 return s2idle_wakeup;
879 #ifdef CONFIG_PM_SLEEP
880 static u32 saved_bm_rld;
882 static int acpi_save_bm_rld(void)
884 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
888 static void acpi_restore_bm_rld(void)
890 u32 resumed_bm_rld = 0;
892 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
893 if (resumed_bm_rld == saved_bm_rld)
896 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
899 static struct syscore_ops acpi_sleep_syscore_ops = {
900 .suspend = acpi_save_bm_rld,
901 .resume = acpi_restore_bm_rld,
904 static void acpi_sleep_syscore_init(void)
906 register_syscore_ops(&acpi_sleep_syscore_ops);
909 static inline void acpi_sleep_syscore_init(void) {}
910 #endif /* CONFIG_PM_SLEEP */
912 #ifdef CONFIG_HIBERNATION
913 static unsigned long s4_hardware_signature;
914 static struct acpi_table_facs *facs;
915 int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
917 static int acpi_hibernation_begin(pm_message_t stage)
920 int error = suspend_nvs_alloc();
925 if (stage.event == PM_EVENT_HIBERNATE)
926 pm_set_suspend_via_firmware();
928 acpi_pm_start(ACPI_STATE_S4);
932 static int acpi_hibernation_enter(void)
934 acpi_status status = AE_OK;
936 /* This shouldn't return. If it returns, we have a problem */
937 status = acpi_enter_sleep_state(ACPI_STATE_S4);
938 /* Reprogram control registers */
939 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
941 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
944 static void acpi_hibernation_leave(void)
946 pm_set_resume_via_firmware();
948 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
952 /* Reprogram control registers */
953 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
954 /* Check the hardware signature */
955 if (facs && s4_hardware_signature != facs->hardware_signature)
956 pr_crit("Hardware changed while hibernated, success doubtful!\n");
957 /* Restore the NVS memory area */
958 suspend_nvs_restore();
959 /* Allow EC transactions to happen. */
960 acpi_ec_unblock_transactions();
963 static void acpi_pm_thaw(void)
965 acpi_ec_unblock_transactions();
966 acpi_enable_all_runtime_gpes();
969 static const struct platform_hibernation_ops acpi_hibernation_ops = {
970 .begin = acpi_hibernation_begin,
972 .pre_snapshot = acpi_pm_prepare,
973 .finish = acpi_pm_finish,
974 .prepare = acpi_pm_prepare,
975 .enter = acpi_hibernation_enter,
976 .leave = acpi_hibernation_leave,
977 .pre_restore = acpi_pm_freeze,
978 .restore_cleanup = acpi_pm_thaw,
982 * acpi_hibernation_begin_old - Set the target system sleep state to
983 * ACPI_STATE_S4 and execute the _PTS control method. This
984 * function is used if the pre-ACPI 2.0 suspend ordering has been
987 static int acpi_hibernation_begin_old(pm_message_t stage)
991 * The _TTS object should always be evaluated before the _PTS object.
992 * When the old_suspended_ordering is true, the _PTS object is
993 * evaluated in the acpi_sleep_prepare.
995 acpi_sleep_tts_switch(ACPI_STATE_S4);
997 error = acpi_sleep_prepare(ACPI_STATE_S4);
1002 error = suspend_nvs_alloc();
1007 if (stage.event == PM_EVENT_HIBERNATE)
1008 pm_set_suspend_via_firmware();
1010 acpi_target_sleep_state = ACPI_STATE_S4;
1011 acpi_scan_lock_acquire();
1016 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1019 static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1020 .begin = acpi_hibernation_begin_old,
1022 .pre_snapshot = acpi_pm_pre_suspend,
1023 .prepare = acpi_pm_freeze,
1024 .finish = acpi_pm_finish,
1025 .enter = acpi_hibernation_enter,
1026 .leave = acpi_hibernation_leave,
1027 .pre_restore = acpi_pm_freeze,
1028 .restore_cleanup = acpi_pm_thaw,
1029 .recover = acpi_pm_finish,
1032 static void acpi_sleep_hibernate_setup(void)
1034 if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1037 hibernation_set_ops(old_suspend_ordering ?
1038 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
1039 sleep_states[ACPI_STATE_S4] = 1;
1040 if (!acpi_check_s4_hw_signature)
1043 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1046 * s4_hardware_signature is the local variable which is just
1047 * used to warn about mismatch after we're attempting to
1048 * resume (in violation of the ACPI specification.)
1050 s4_hardware_signature = facs->hardware_signature;
1052 if (acpi_check_s4_hw_signature > 0) {
1054 * If we're actually obeying the ACPI specification
1055 * then the signature is written out as part of the
1056 * swsusp header, in order to allow the boot kernel
1057 * to gracefully decline to resume.
1059 swsusp_hardware_signature = facs->hardware_signature;
1063 #else /* !CONFIG_HIBERNATION */
1064 static inline void acpi_sleep_hibernate_setup(void) {}
1065 #endif /* !CONFIG_HIBERNATION */
1067 static int acpi_power_off_prepare(struct sys_off_data *data)
1069 /* Prepare to power off the system */
1070 acpi_sleep_prepare(ACPI_STATE_S5);
1071 acpi_disable_all_gpes();
1072 acpi_os_wait_events_complete();
1076 static int acpi_power_off(struct sys_off_data *data)
1078 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1079 pr_debug("%s called\n", __func__);
1080 local_irq_disable();
1081 acpi_enter_sleep_state(ACPI_STATE_S5);
1085 int __init acpi_sleep_init(void)
1087 char supported[ACPI_S_STATE_COUNT * 3 + 1];
1088 char *pos = supported;
1091 acpi_sleep_dmi_check();
1093 sleep_states[ACPI_STATE_S0] = 1;
1095 acpi_sleep_syscore_init();
1096 acpi_sleep_suspend_setup();
1097 acpi_sleep_hibernate_setup();
1099 if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1100 sleep_states[ACPI_STATE_S5] = 1;
1102 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
1103 SYS_OFF_PRIO_FIRMWARE,
1104 acpi_power_off_prepare, NULL);
1106 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
1107 SYS_OFF_PRIO_FIRMWARE,
1108 acpi_power_off, NULL);
1111 * Windows uses S5 for reboot, so some BIOSes depend on it to
1112 * perform proper reboot.
1114 register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
1115 SYS_OFF_PRIO_FIRMWARE,
1116 acpi_power_off_prepare, NULL);
1122 for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1123 if (sleep_states[i])
1124 pos += sprintf(pos, " S%d", i);
1126 pr_info("(supports%s)\n", supported);
1129 * Register the tts_notifier to reboot notifier list so that the _TTS
1130 * object can also be evaluated when the system enters S5.
1132 register_reboot_notifier(&tts_notifier);