Initial commit
[kernel/linux-3.0.git] / kernel / power / suspend.c
1 /*
2  * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7  *
8  * This file is released under the GPLv2.
9  */
10
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/suspend.h>
25 #include <linux/syscore_ops.h>
26 #include <trace/events/power.h>
27
28 #include "power.h"
29
30 const char *const pm_states[PM_SUSPEND_MAX] = {
31 #ifdef CONFIG_EARLYSUSPEND
32         [PM_SUSPEND_ON]         = "on",
33 #elif defined(CONFIG_PARTIALSUSPEND_SLP)
34         [PM_SUSPEND_ON]         = "post_resume",
35 #endif
36         [PM_SUSPEND_STANDBY]    = "standby",
37         [PM_SUSPEND_MEM]        = "mem",
38 #ifdef CONFIG_PARTIALSUSPEND_SLP
39         [PM_SUSPEND_PRE]        = "pre_suspend"
40 #endif
41 };
42
43 static const struct platform_suspend_ops *suspend_ops;
44
45 /**
46  *      suspend_set_ops - Set the global suspend method table.
47  *      @ops:   Pointer to ops structure.
48  */
49 void suspend_set_ops(const struct platform_suspend_ops *ops)
50 {
51         mutex_lock(&pm_mutex);
52         suspend_ops = ops;
53         mutex_unlock(&pm_mutex);
54 }
55
56 bool valid_state(suspend_state_t state)
57 {
58         /*
59          * All states need lowlevel support and need to be valid to the lowlevel
60          * implementation, no valid callback implies that none are valid.
61          */
62         return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
63 }
64
65 /**
66  * suspend_valid_only_mem - generic memory-only valid callback
67  *
68  * Platform drivers that implement mem suspend only and only need
69  * to check for that in their .valid callback can use this instead
70  * of rolling their own .valid callback.
71  */
72 int suspend_valid_only_mem(suspend_state_t state)
73 {
74         return state == PM_SUSPEND_MEM;
75 }
76
77 #ifdef CONFIG_PARTIALSUSPEND_SLP
78 int suspend_valid_partialsuspend(suspend_state_t state)
79 {
80         return ((state == PM_SUSPEND_MEM) || (state == PM_SUSPEND_PRE)
81                 || (state == PM_SUSPEND_ON));
82 }
83 #endif
84
85 static int suspend_test(int level)
86 {
87 #ifdef CONFIG_PM_DEBUG
88         if (pm_test_level == level) {
89                 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
90                 mdelay(5000);
91                 return 1;
92         }
93 #endif /* !CONFIG_PM_DEBUG */
94         return 0;
95 }
96
97 /**
98  *      suspend_prepare - Do prep work before entering low-power state.
99  *
100  *      This is common code that is called for each state that we're entering.
101  *      Run suspend notifiers, allocate a console and stop all processes.
102  */
103 static int suspend_prepare(void)
104 {
105         int error;
106
107         if (!suspend_ops || !suspend_ops->enter)
108                 return -EPERM;
109
110         pm_prepare_console();
111
112         error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
113         if (error)
114                 goto Finish;
115
116         error = usermodehelper_disable();
117         if (error)
118                 goto Finish;
119
120         error = suspend_freeze_processes();
121         if (!error)
122                 return 0;
123
124         suspend_thaw_processes();
125         usermodehelper_enable();
126  Finish:
127         pm_notifier_call_chain(PM_POST_SUSPEND);
128         pm_restore_console();
129         return error;
130 }
131
132 /* default implementation */
133 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
134 {
135         local_irq_disable();
136 }
137
138 /* default implementation */
139 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
140 {
141         local_irq_enable();
142 }
143
144 #if !defined(CONFIG_CPU_EXYNOS4210)
145 #define CHECK_POINT printk(KERN_DEBUG "%s:%d\n", __func__, __LINE__)
146 #else
147 #define CHECK_POINT
148 #endif
149
150 /**
151  * suspend_enter - enter the desired system sleep state.
152  * @state: State to enter
153  * @wakeup: Returns information that suspend should not be entered again.
154  *
155  * This function should be called after devices have been suspended.
156  */
157 static int suspend_enter(suspend_state_t state, bool *wakeup)
158 {
159         int error;
160
161         CHECK_POINT;
162
163         if (suspend_ops->prepare) {
164                 error = suspend_ops->prepare();
165                 if (error)
166                         goto Platform_finish;
167         }
168
169         CHECK_POINT;
170
171         error = dpm_suspend_noirq(PMSG_SUSPEND);
172         if (error) {
173                 printk(KERN_ERR "PM: Some devices failed to power down\n");
174                 goto Platform_finish;
175         }
176
177         CHECK_POINT;
178
179         if (suspend_ops->prepare_late) {
180                 error = suspend_ops->prepare_late();
181                 if (error)
182                         goto Platform_wake;
183         }
184
185         if (suspend_test(TEST_PLATFORM))
186                 goto Platform_wake;
187
188         error = disable_nonboot_cpus();
189         if (error || suspend_test(TEST_CPUS))
190                 goto Enable_cpus;
191
192         CHECK_POINT;
193
194         arch_suspend_disable_irqs();
195         BUG_ON(!irqs_disabled());
196
197         error = syscore_suspend();
198
199         CHECK_POINT;
200
201         if (!error) {
202                 *wakeup = pm_wakeup_pending();
203                 if (!(suspend_test(TEST_CORE) || *wakeup)) {
204                         error = suspend_ops->enter(state);
205                         events_check_enabled = false;
206                 }
207                 syscore_resume();
208         }
209
210         arch_suspend_enable_irqs();
211         BUG_ON(irqs_disabled());
212
213  Enable_cpus:
214         enable_nonboot_cpus();
215
216  Platform_wake:
217         if (suspend_ops->wake)
218                 suspend_ops->wake();
219
220         dpm_resume_noirq(PMSG_RESUME);
221
222  Platform_finish:
223         if (suspend_ops->finish)
224                 suspend_ops->finish();
225
226         return error;
227 }
228
229 /**
230  *      suspend_devices_and_enter - suspend devices and enter the desired system
231  *                                  sleep state.
232  *      @state:           state to enter
233  */
234 int suspend_devices_and_enter(suspend_state_t state)
235 {
236         int error;
237         bool wakeup = false;
238
239         if (!suspend_ops)
240                 return -ENOSYS;
241
242         trace_machine_suspend(state);
243         if (suspend_ops->begin) {
244                 error = suspend_ops->begin(state);
245                 if (error)
246                         goto Close;
247         }
248         suspend_console();
249         suspend_test_start();
250         error = dpm_suspend_start(PMSG_SUSPEND);
251         if (error) {
252                 printk(KERN_ERR "PM: Some devices failed to suspend\n");
253                 goto Recover_platform;
254         }
255         suspend_test_finish("suspend devices");
256         if (suspend_test(TEST_DEVICES))
257                 goto Recover_platform;
258
259         do {
260                 error = suspend_enter(state, &wakeup);
261         } while (!error && !wakeup
262                 && suspend_ops->suspend_again && suspend_ops->suspend_again());
263
264  Resume_devices:
265         suspend_test_start();
266         dpm_resume_end(PMSG_RESUME);
267         suspend_test_finish("resume devices");
268         resume_console();
269  Close:
270         if (suspend_ops->end)
271                 suspend_ops->end();
272         trace_machine_suspend(PWR_EVENT_EXIT);
273         return error;
274
275  Recover_platform:
276         if (suspend_ops->recover)
277                 suspend_ops->recover();
278         goto Resume_devices;
279 }
280
281 /**
282  *      suspend_finish - Do final work before exiting suspend sequence.
283  *
284  *      Call platform code to clean up, restart processes, and free the
285  *      console that we've allocated. This is not called for suspend-to-disk.
286  */
287 static void suspend_finish(void)
288 {
289         suspend_thaw_processes();
290         usermodehelper_enable();
291         pm_notifier_call_chain(PM_POST_SUSPEND);
292         pm_restore_console();
293 }
294
295 #ifdef CONFIG_PM_WATCHDOG_TIMEOUT
296 void pm_wd_timeout(unsigned long data)
297 {
298         struct pm_wd_data *wd_data = (void *)data;
299         struct task_struct *tsk = wd_data->tsk;
300
301         pr_emerg("%s: PM watchdog timeout: %d seconds\n",  __func__,
302                         wd_data->timeout);
303
304         pr_emerg("stack:\n");
305         show_stack(tsk, NULL);
306
307         BUG();
308 }
309
310 void pm_wd_add_timer(struct timer_list *timer, struct pm_wd_data *data,
311                         int timeout)
312 {
313         data->timeout = timeout;
314         data->tsk = get_current();
315         init_timer_on_stack(timer);
316         timer->expires = jiffies + HZ * data->timeout;
317         timer->function = pm_wd_timeout;
318         timer->data = (unsigned long)data;
319         add_timer(timer);
320 }
321
322 void pm_wd_del_timer(struct timer_list *timer)
323 {
324         del_timer_sync(timer);
325         destroy_timer_on_stack(timer);
326 }
327 #endif
328
329 /**
330  *      enter_state - Do common work of entering low-power state.
331  *      @state:         pm_state structure for state we're entering.
332  *
333  *      Make sure we're the only ones trying to enter a sleep state. Fail
334  *      if someone has beat us to it, since we don't want anything weird to
335  *      happen when we wake up.
336  *      Then, do the setup for suspend, enter the state, and cleaup (after
337  *      we've woken up).
338  */
339 int enter_state(suspend_state_t state)
340 {
341         int error;
342         struct timer_list timer;
343         struct pm_wd_data data;
344
345         if (!valid_state(state))
346                 return -ENODEV;
347
348         if (!mutex_trylock(&pm_mutex))
349                 return -EBUSY;
350
351         printk(KERN_INFO "PM: Syncing filesystems ... ");
352         sys_sync();
353         printk("done.\n");
354
355         pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
356         error = suspend_prepare();
357         if (error)
358                 goto Unlock;
359
360         if (suspend_test(TEST_FREEZER))
361                 goto Finish;
362
363         pr_debug("PM: Entering %s sleep\n", pm_states[state]);
364         pm_restrict_gfp_mask();
365         error = suspend_devices_and_enter(state);
366         pm_restore_gfp_mask();
367
368  Finish:
369         pm_wd_add_timer(&timer, &data, 15);
370
371         pr_debug("PM: Finishing wakeup.\n");
372         suspend_finish();
373
374         pm_wd_del_timer(&timer);
375  Unlock:
376         mutex_unlock(&pm_mutex);
377         return error;
378 }
379
380 /**
381  *      pm_suspend - Externally visible function for suspending system.
382  *      @state:         Enumerated value of state to enter.
383  *
384  *      Determine whether or not value is within range, get state
385  *      structure, and enter (above).
386  */
387 int pm_suspend(suspend_state_t state)
388 {
389         if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
390                 return enter_state(state);
391         return -EINVAL;
392 }
393 EXPORT_SYMBOL(pm_suspend);