2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8 * This file is released under the GPLv2.
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/suspend.h>
25 #include <linux/syscore_ops.h>
26 #include <trace/events/power.h>
30 const char *const pm_states[PM_SUSPEND_MAX] = {
31 #ifdef CONFIG_EARLYSUSPEND
32 [PM_SUSPEND_ON] = "on",
33 #elif defined(CONFIG_PARTIALSUSPEND_SLP)
34 [PM_SUSPEND_ON] = "post_resume",
36 [PM_SUSPEND_STANDBY] = "standby",
37 [PM_SUSPEND_MEM] = "mem",
38 #ifdef CONFIG_PARTIALSUSPEND_SLP
39 [PM_SUSPEND_PRE] = "pre_suspend"
43 static const struct platform_suspend_ops *suspend_ops;
46 * suspend_set_ops - Set the global suspend method table.
47 * @ops: Pointer to ops structure.
49 void suspend_set_ops(const struct platform_suspend_ops *ops)
51 mutex_lock(&pm_mutex);
53 mutex_unlock(&pm_mutex);
56 bool valid_state(suspend_state_t state)
59 * All states need lowlevel support and need to be valid to the lowlevel
60 * implementation, no valid callback implies that none are valid.
62 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
66 * suspend_valid_only_mem - generic memory-only valid callback
68 * Platform drivers that implement mem suspend only and only need
69 * to check for that in their .valid callback can use this instead
70 * of rolling their own .valid callback.
72 int suspend_valid_only_mem(suspend_state_t state)
74 return state == PM_SUSPEND_MEM;
77 #ifdef CONFIG_PARTIALSUSPEND_SLP
78 int suspend_valid_partialsuspend(suspend_state_t state)
80 return ((state == PM_SUSPEND_MEM) || (state == PM_SUSPEND_PRE)
81 || (state == PM_SUSPEND_ON));
85 static int suspend_test(int level)
87 #ifdef CONFIG_PM_DEBUG
88 if (pm_test_level == level) {
89 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
93 #endif /* !CONFIG_PM_DEBUG */
98 * suspend_prepare - Do prep work before entering low-power state.
100 * This is common code that is called for each state that we're entering.
101 * Run suspend notifiers, allocate a console and stop all processes.
103 static int suspend_prepare(void)
107 if (!suspend_ops || !suspend_ops->enter)
110 pm_prepare_console();
112 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
116 error = usermodehelper_disable();
120 error = suspend_freeze_processes();
124 suspend_thaw_processes();
125 usermodehelper_enable();
127 pm_notifier_call_chain(PM_POST_SUSPEND);
128 pm_restore_console();
132 /* default implementation */
133 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
138 /* default implementation */
139 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
144 #if !defined(CONFIG_CPU_EXYNOS4210)
145 #define CHECK_POINT printk(KERN_DEBUG "%s:%d\n", __func__, __LINE__)
151 * suspend_enter - enter the desired system sleep state.
152 * @state: State to enter
153 * @wakeup: Returns information that suspend should not be entered again.
155 * This function should be called after devices have been suspended.
157 static int suspend_enter(suspend_state_t state, bool *wakeup)
163 if (suspend_ops->prepare) {
164 error = suspend_ops->prepare();
166 goto Platform_finish;
171 error = dpm_suspend_noirq(PMSG_SUSPEND);
173 printk(KERN_ERR "PM: Some devices failed to power down\n");
174 goto Platform_finish;
179 if (suspend_ops->prepare_late) {
180 error = suspend_ops->prepare_late();
185 if (suspend_test(TEST_PLATFORM))
188 error = disable_nonboot_cpus();
189 if (error || suspend_test(TEST_CPUS))
194 arch_suspend_disable_irqs();
195 BUG_ON(!irqs_disabled());
197 error = syscore_suspend();
202 *wakeup = pm_wakeup_pending();
203 if (!(suspend_test(TEST_CORE) || *wakeup)) {
204 error = suspend_ops->enter(state);
205 events_check_enabled = false;
210 arch_suspend_enable_irqs();
211 BUG_ON(irqs_disabled());
214 enable_nonboot_cpus();
217 if (suspend_ops->wake)
220 dpm_resume_noirq(PMSG_RESUME);
223 if (suspend_ops->finish)
224 suspend_ops->finish();
230 * suspend_devices_and_enter - suspend devices and enter the desired system
232 * @state: state to enter
234 int suspend_devices_and_enter(suspend_state_t state)
242 trace_machine_suspend(state);
243 if (suspend_ops->begin) {
244 error = suspend_ops->begin(state);
249 suspend_test_start();
250 error = dpm_suspend_start(PMSG_SUSPEND);
252 printk(KERN_ERR "PM: Some devices failed to suspend\n");
253 goto Recover_platform;
255 suspend_test_finish("suspend devices");
256 if (suspend_test(TEST_DEVICES))
257 goto Recover_platform;
260 error = suspend_enter(state, &wakeup);
261 } while (!error && !wakeup
262 && suspend_ops->suspend_again && suspend_ops->suspend_again());
265 suspend_test_start();
266 dpm_resume_end(PMSG_RESUME);
267 suspend_test_finish("resume devices");
270 if (suspend_ops->end)
272 trace_machine_suspend(PWR_EVENT_EXIT);
276 if (suspend_ops->recover)
277 suspend_ops->recover();
282 * suspend_finish - Do final work before exiting suspend sequence.
284 * Call platform code to clean up, restart processes, and free the
285 * console that we've allocated. This is not called for suspend-to-disk.
287 static void suspend_finish(void)
289 suspend_thaw_processes();
290 usermodehelper_enable();
291 pm_notifier_call_chain(PM_POST_SUSPEND);
292 pm_restore_console();
295 #ifdef CONFIG_PM_WATCHDOG_TIMEOUT
296 void pm_wd_timeout(unsigned long data)
298 struct pm_wd_data *wd_data = (void *)data;
299 struct task_struct *tsk = wd_data->tsk;
301 pr_emerg("%s: PM watchdog timeout: %d seconds\n", __func__,
304 pr_emerg("stack:\n");
305 show_stack(tsk, NULL);
310 void pm_wd_add_timer(struct timer_list *timer, struct pm_wd_data *data,
313 data->timeout = timeout;
314 data->tsk = get_current();
315 init_timer_on_stack(timer);
316 timer->expires = jiffies + HZ * data->timeout;
317 timer->function = pm_wd_timeout;
318 timer->data = (unsigned long)data;
322 void pm_wd_del_timer(struct timer_list *timer)
324 del_timer_sync(timer);
325 destroy_timer_on_stack(timer);
330 * enter_state - Do common work of entering low-power state.
331 * @state: pm_state structure for state we're entering.
333 * Make sure we're the only ones trying to enter a sleep state. Fail
334 * if someone has beat us to it, since we don't want anything weird to
335 * happen when we wake up.
336 * Then, do the setup for suspend, enter the state, and cleaup (after
339 int enter_state(suspend_state_t state)
342 struct timer_list timer;
343 struct pm_wd_data data;
345 if (!valid_state(state))
348 if (!mutex_trylock(&pm_mutex))
351 printk(KERN_INFO "PM: Syncing filesystems ... ");
355 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
356 error = suspend_prepare();
360 if (suspend_test(TEST_FREEZER))
363 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
364 pm_restrict_gfp_mask();
365 error = suspend_devices_and_enter(state);
366 pm_restore_gfp_mask();
369 pm_wd_add_timer(&timer, &data, 15);
371 pr_debug("PM: Finishing wakeup.\n");
374 pm_wd_del_timer(&timer);
376 mutex_unlock(&pm_mutex);
381 * pm_suspend - Externally visible function for suspending system.
382 * @state: Enumerated value of state to enter.
384 * Determine whether or not value is within range, get state
385 * structure, and enter (above).
387 int pm_suspend(suspend_state_t state)
389 if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
390 return enter_state(state);
393 EXPORT_SYMBOL(pm_suspend);