2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8 * This file is released under the GPLv2.
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/suspend.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/ftrace.h>
27 #include <trace/events/power.h>
31 const char *const pm_states[PM_SUSPEND_MAX] = {
32 #ifdef CONFIG_EARLYSUSPEND
33 [PM_SUSPEND_ON] = "on",
34 #elif defined(CONFIG_PARTIALSUSPEND_SLP)
35 [PM_SUSPEND_ON] = "post_resume",
37 [PM_SUSPEND_STANDBY] = "standby",
38 [PM_SUSPEND_MEM] = "mem",
39 #ifdef CONFIG_PARTIALSUSPEND_SLP
40 [PM_SUSPEND_PRE] = "pre_suspend"
44 static const struct platform_suspend_ops *suspend_ops;
47 * suspend_set_ops - Set the global suspend method table.
48 * @ops: Pointer to ops structure.
50 void suspend_set_ops(const struct platform_suspend_ops *ops)
52 mutex_lock(&pm_mutex);
54 mutex_unlock(&pm_mutex);
57 bool valid_state(suspend_state_t state)
60 * All states need lowlevel support and need to be valid to the lowlevel
61 * implementation, no valid callback implies that none are valid.
63 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
67 * suspend_valid_only_mem - generic memory-only valid callback
69 * Platform drivers that implement mem suspend only and only need
70 * to check for that in their .valid callback can use this instead
71 * of rolling their own .valid callback.
73 int suspend_valid_only_mem(suspend_state_t state)
75 return state == PM_SUSPEND_MEM;
78 #ifdef CONFIG_PARTIALSUSPEND_SLP
79 int suspend_valid_partialsuspend(suspend_state_t state)
81 return ((state == PM_SUSPEND_MEM) || (state == PM_SUSPEND_PRE)
82 || (state == PM_SUSPEND_ON));
86 static int suspend_test(int level)
88 #ifdef CONFIG_PM_DEBUG
89 if (pm_test_level == level) {
90 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
94 #endif /* !CONFIG_PM_DEBUG */
99 * suspend_prepare - Do prep work before entering low-power state.
101 * This is common code that is called for each state that we're entering.
102 * Run suspend notifiers, allocate a console and stop all processes.
104 static int suspend_prepare(void)
108 if (!suspend_ops || !suspend_ops->enter)
111 pm_prepare_console();
113 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
117 error = usermodehelper_disable();
121 error = suspend_freeze_processes();
125 suspend_thaw_processes();
126 usermodehelper_enable();
128 pm_notifier_call_chain(PM_POST_SUSPEND);
129 pm_restore_console();
133 /* default implementation */
134 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
139 /* default implementation */
140 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
145 #if !defined(CONFIG_CPU_EXYNOS4210)
146 #define CHECK_POINT printk(KERN_DEBUG "%s:%d\n", __func__, __LINE__)
152 * suspend_enter - enter the desired system sleep state.
153 * @state: State to enter
154 * @wakeup: Returns information that suspend should not be entered again.
156 * This function should be called after devices have been suspended.
158 static int suspend_enter(suspend_state_t state, bool *wakeup)
164 if (suspend_ops->prepare) {
165 error = suspend_ops->prepare();
167 goto Platform_finish;
172 error = dpm_suspend_noirq(PMSG_SUSPEND);
174 printk(KERN_ERR "PM: Some devices failed to power down\n");
175 goto Platform_finish;
180 if (suspend_ops->prepare_late) {
181 error = suspend_ops->prepare_late();
186 if (suspend_test(TEST_PLATFORM))
189 error = disable_nonboot_cpus();
190 if (error || suspend_test(TEST_CPUS))
195 arch_suspend_disable_irqs();
196 BUG_ON(!irqs_disabled());
198 error = syscore_suspend();
203 *wakeup = pm_wakeup_pending();
204 if (!(suspend_test(TEST_CORE) || *wakeup)) {
205 error = suspend_ops->enter(state);
206 events_check_enabled = false;
211 arch_suspend_enable_irqs();
212 BUG_ON(irqs_disabled());
215 enable_nonboot_cpus();
218 if (suspend_ops->wake)
221 dpm_resume_noirq(PMSG_RESUME);
224 if (suspend_ops->finish)
225 suspend_ops->finish();
231 * suspend_devices_and_enter - suspend devices and enter the desired system
233 * @state: state to enter
235 int suspend_devices_and_enter(suspend_state_t state)
243 trace_machine_suspend(state);
244 if (suspend_ops->begin) {
245 error = suspend_ops->begin(state);
251 suspend_test_start();
252 error = dpm_suspend_start(PMSG_SUSPEND);
254 printk(KERN_ERR "PM: Some devices failed to suspend\n");
255 goto Recover_platform;
257 suspend_test_finish("suspend devices");
258 if (suspend_test(TEST_DEVICES))
259 goto Recover_platform;
262 error = suspend_enter(state, &wakeup);
263 } while (!error && !wakeup
264 && suspend_ops->suspend_again && suspend_ops->suspend_again());
267 suspend_test_start();
268 dpm_resume_end(PMSG_RESUME);
269 suspend_test_finish("resume devices");
273 if (suspend_ops->end)
275 trace_machine_suspend(PWR_EVENT_EXIT);
279 if (suspend_ops->recover)
280 suspend_ops->recover();
285 * suspend_finish - Do final work before exiting suspend sequence.
287 * Call platform code to clean up, restart processes, and free the
288 * console that we've allocated. This is not called for suspend-to-disk.
290 static void suspend_finish(void)
292 suspend_thaw_processes();
293 usermodehelper_enable();
294 pm_notifier_call_chain(PM_POST_SUSPEND);
295 pm_restore_console();
298 #ifdef CONFIG_PM_WATCHDOG_TIMEOUT
299 void pm_wd_timeout(unsigned long data)
301 struct pm_wd_data *wd_data = (void *)data;
302 struct task_struct *tsk = wd_data->tsk;
304 pr_emerg("%s: PM watchdog timeout: %d seconds\n", __func__,
307 pr_emerg("stack:\n");
308 show_stack(tsk, NULL);
313 void pm_wd_add_timer(struct timer_list *timer, struct pm_wd_data *data,
316 data->timeout = timeout;
317 data->tsk = get_current();
318 init_timer_on_stack(timer);
319 timer->expires = jiffies + HZ * data->timeout;
320 timer->function = pm_wd_timeout;
321 timer->data = (unsigned long)data;
325 void pm_wd_del_timer(struct timer_list *timer)
327 del_timer_sync(timer);
328 destroy_timer_on_stack(timer);
333 * enter_state - Do common work of entering low-power state.
334 * @state: pm_state structure for state we're entering.
336 * Make sure we're the only ones trying to enter a sleep state. Fail
337 * if someone has beat us to it, since we don't want anything weird to
338 * happen when we wake up.
339 * Then, do the setup for suspend, enter the state, and cleaup (after
342 int enter_state(suspend_state_t state)
345 struct timer_list timer;
346 struct pm_wd_data data;
348 if (!valid_state(state))
351 if (!mutex_trylock(&pm_mutex))
354 printk(KERN_INFO "PM: Syncing filesystems ... ");
358 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
359 error = suspend_prepare();
363 if (suspend_test(TEST_FREEZER))
366 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
367 pm_restrict_gfp_mask();
368 error = suspend_devices_and_enter(state);
369 pm_restore_gfp_mask();
372 pm_wd_add_timer(&timer, &data, 15);
374 pr_debug("PM: Finishing wakeup.\n");
377 pm_wd_del_timer(&timer);
379 mutex_unlock(&pm_mutex);
384 * pm_suspend - Externally visible function for suspending system.
385 * @state: Enumerated value of state to enter.
387 * Determine whether or not value is within range, get state
388 * structure, and enter (above).
390 int pm_suspend(suspend_state_t state)
392 if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
393 return enter_state(state);
396 EXPORT_SYMBOL(pm_suspend);