tizen 2.3.1 release
[kernel/linux-3.0.git] / kernel / power / suspend.c
1 /*
2  * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7  *
8  * This file is released under the GPLv2.
9  */
10
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/suspend.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/ftrace.h>
27 #include <trace/events/power.h>
28
29 #include "power.h"
30
31 const char *const pm_states[PM_SUSPEND_MAX] = {
32 #ifdef CONFIG_EARLYSUSPEND
33         [PM_SUSPEND_ON]         = "on",
34 #elif defined(CONFIG_PARTIALSUSPEND_SLP)
35         [PM_SUSPEND_ON]         = "post_resume",
36 #endif
37         [PM_SUSPEND_STANDBY]    = "standby",
38         [PM_SUSPEND_MEM]        = "mem",
39 #ifdef CONFIG_PARTIALSUSPEND_SLP
40         [PM_SUSPEND_PRE]        = "pre_suspend"
41 #endif
42 };
43
44 static const struct platform_suspend_ops *suspend_ops;
45
46 /**
47  *      suspend_set_ops - Set the global suspend method table.
48  *      @ops:   Pointer to ops structure.
49  */
50 void suspend_set_ops(const struct platform_suspend_ops *ops)
51 {
52         mutex_lock(&pm_mutex);
53         suspend_ops = ops;
54         mutex_unlock(&pm_mutex);
55 }
56
57 bool valid_state(suspend_state_t state)
58 {
59         /*
60          * All states need lowlevel support and need to be valid to the lowlevel
61          * implementation, no valid callback implies that none are valid.
62          */
63         return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
64 }
65
66 /**
67  * suspend_valid_only_mem - generic memory-only valid callback
68  *
69  * Platform drivers that implement mem suspend only and only need
70  * to check for that in their .valid callback can use this instead
71  * of rolling their own .valid callback.
72  */
73 int suspend_valid_only_mem(suspend_state_t state)
74 {
75         return state == PM_SUSPEND_MEM;
76 }
77
78 #ifdef CONFIG_PARTIALSUSPEND_SLP
79 int suspend_valid_partialsuspend(suspend_state_t state)
80 {
81         return ((state == PM_SUSPEND_MEM) || (state == PM_SUSPEND_PRE)
82                 || (state == PM_SUSPEND_ON));
83 }
84 #endif
85
86 static int suspend_test(int level)
87 {
88 #ifdef CONFIG_PM_DEBUG
89         if (pm_test_level == level) {
90                 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
91                 mdelay(5000);
92                 return 1;
93         }
94 #endif /* !CONFIG_PM_DEBUG */
95         return 0;
96 }
97
98 /**
99  *      suspend_prepare - Do prep work before entering low-power state.
100  *
101  *      This is common code that is called for each state that we're entering.
102  *      Run suspend notifiers, allocate a console and stop all processes.
103  */
104 static int suspend_prepare(void)
105 {
106         int error;
107
108         if (!suspend_ops || !suspend_ops->enter)
109                 return -EPERM;
110
111         pm_prepare_console();
112
113         error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
114         if (error)
115                 goto Finish;
116
117         error = usermodehelper_disable();
118         if (error)
119                 goto Finish;
120
121         error = suspend_freeze_processes();
122         if (!error)
123                 return 0;
124
125         suspend_thaw_processes();
126         usermodehelper_enable();
127  Finish:
128         pm_notifier_call_chain(PM_POST_SUSPEND);
129         pm_restore_console();
130         return error;
131 }
132
133 /* default implementation */
134 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
135 {
136         local_irq_disable();
137 }
138
139 /* default implementation */
140 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
141 {
142         local_irq_enable();
143 }
144
145 #if !defined(CONFIG_CPU_EXYNOS4210)
146 #define CHECK_POINT printk(KERN_DEBUG "%s:%d\n", __func__, __LINE__)
147 #else
148 #define CHECK_POINT
149 #endif
150
151 /**
152  * suspend_enter - enter the desired system sleep state.
153  * @state: State to enter
154  * @wakeup: Returns information that suspend should not be entered again.
155  *
156  * This function should be called after devices have been suspended.
157  */
158 static int suspend_enter(suspend_state_t state, bool *wakeup)
159 {
160         int error;
161
162         CHECK_POINT;
163
164         if (suspend_ops->prepare) {
165                 error = suspend_ops->prepare();
166                 if (error)
167                         goto Platform_finish;
168         }
169
170         CHECK_POINT;
171
172         error = dpm_suspend_noirq(PMSG_SUSPEND);
173         if (error) {
174                 printk(KERN_ERR "PM: Some devices failed to power down\n");
175                 goto Platform_finish;
176         }
177
178         CHECK_POINT;
179
180         if (suspend_ops->prepare_late) {
181                 error = suspend_ops->prepare_late();
182                 if (error)
183                         goto Platform_wake;
184         }
185
186         if (suspend_test(TEST_PLATFORM))
187                 goto Platform_wake;
188
189         error = disable_nonboot_cpus();
190         if (error || suspend_test(TEST_CPUS))
191                 goto Enable_cpus;
192
193         CHECK_POINT;
194
195         arch_suspend_disable_irqs();
196         BUG_ON(!irqs_disabled());
197
198         error = syscore_suspend();
199
200         CHECK_POINT;
201
202         if (!error) {
203                 *wakeup = pm_wakeup_pending();
204                 if (!(suspend_test(TEST_CORE) || *wakeup)) {
205                         error = suspend_ops->enter(state);
206                         events_check_enabled = false;
207                 }
208                 syscore_resume();
209         }
210
211         arch_suspend_enable_irqs();
212         BUG_ON(irqs_disabled());
213
214  Enable_cpus:
215         enable_nonboot_cpus();
216
217  Platform_wake:
218         if (suspend_ops->wake)
219                 suspend_ops->wake();
220
221         dpm_resume_noirq(PMSG_RESUME);
222
223  Platform_finish:
224         if (suspend_ops->finish)
225                 suspend_ops->finish();
226
227         return error;
228 }
229
230 /**
231  *      suspend_devices_and_enter - suspend devices and enter the desired system
232  *                                  sleep state.
233  *      @state:           state to enter
234  */
235 int suspend_devices_and_enter(suspend_state_t state)
236 {
237         int error;
238         bool wakeup = false;
239
240         if (!suspend_ops)
241                 return -ENOSYS;
242
243         trace_machine_suspend(state);
244         if (suspend_ops->begin) {
245                 error = suspend_ops->begin(state);
246                 if (error)
247                         goto Close;
248         }
249         suspend_console();
250         ftrace_stop();
251         suspend_test_start();
252         error = dpm_suspend_start(PMSG_SUSPEND);
253         if (error) {
254                 printk(KERN_ERR "PM: Some devices failed to suspend\n");
255                 goto Recover_platform;
256         }
257         suspend_test_finish("suspend devices");
258         if (suspend_test(TEST_DEVICES))
259                 goto Recover_platform;
260
261         do {
262                 error = suspend_enter(state, &wakeup);
263         } while (!error && !wakeup
264                 && suspend_ops->suspend_again && suspend_ops->suspend_again());
265
266  Resume_devices:
267         suspend_test_start();
268         dpm_resume_end(PMSG_RESUME);
269         suspend_test_finish("resume devices");
270         ftrace_start();
271         resume_console();
272  Close:
273         if (suspend_ops->end)
274                 suspend_ops->end();
275         trace_machine_suspend(PWR_EVENT_EXIT);
276         return error;
277
278  Recover_platform:
279         if (suspend_ops->recover)
280                 suspend_ops->recover();
281         goto Resume_devices;
282 }
283
284 /**
285  *      suspend_finish - Do final work before exiting suspend sequence.
286  *
287  *      Call platform code to clean up, restart processes, and free the
288  *      console that we've allocated. This is not called for suspend-to-disk.
289  */
290 static void suspend_finish(void)
291 {
292         suspend_thaw_processes();
293         usermodehelper_enable();
294         pm_notifier_call_chain(PM_POST_SUSPEND);
295         pm_restore_console();
296 }
297
298 #ifdef CONFIG_PM_WATCHDOG_TIMEOUT
299 void pm_wd_timeout(unsigned long data)
300 {
301         struct pm_wd_data *wd_data = (void *)data;
302         struct task_struct *tsk = wd_data->tsk;
303
304         pr_emerg("%s: PM watchdog timeout: %d seconds\n",  __func__,
305                         wd_data->timeout);
306
307         pr_emerg("stack:\n");
308         show_stack(tsk, NULL);
309
310         BUG();
311 }
312
313 void pm_wd_add_timer(struct timer_list *timer, struct pm_wd_data *data,
314                         int timeout)
315 {
316         data->timeout = timeout;
317         data->tsk = get_current();
318         init_timer_on_stack(timer);
319         timer->expires = jiffies + HZ * data->timeout;
320         timer->function = pm_wd_timeout;
321         timer->data = (unsigned long)data;
322         add_timer(timer);
323 }
324
325 void pm_wd_del_timer(struct timer_list *timer)
326 {
327         del_timer_sync(timer);
328         destroy_timer_on_stack(timer);
329 }
330 #endif
331
332 /**
333  *      enter_state - Do common work of entering low-power state.
334  *      @state:         pm_state structure for state we're entering.
335  *
336  *      Make sure we're the only ones trying to enter a sleep state. Fail
337  *      if someone has beat us to it, since we don't want anything weird to
338  *      happen when we wake up.
339  *      Then, do the setup for suspend, enter the state, and cleaup (after
340  *      we've woken up).
341  */
342 int enter_state(suspend_state_t state)
343 {
344         int error;
345         struct timer_list timer;
346         struct pm_wd_data data;
347
348         if (!valid_state(state))
349                 return -ENODEV;
350
351         if (!mutex_trylock(&pm_mutex))
352                 return -EBUSY;
353
354         printk(KERN_INFO "PM: Syncing filesystems ... ");
355         sys_sync();
356         printk("done.\n");
357
358         pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
359         error = suspend_prepare();
360         if (error)
361                 goto Unlock;
362
363         if (suspend_test(TEST_FREEZER))
364                 goto Finish;
365
366         pr_debug("PM: Entering %s sleep\n", pm_states[state]);
367         pm_restrict_gfp_mask();
368         error = suspend_devices_and_enter(state);
369         pm_restore_gfp_mask();
370
371  Finish:
372         pm_wd_add_timer(&timer, &data, 15);
373
374         pr_debug("PM: Finishing wakeup.\n");
375         suspend_finish();
376
377         pm_wd_del_timer(&timer);
378  Unlock:
379         mutex_unlock(&pm_mutex);
380         return error;
381 }
382
383 /**
384  *      pm_suspend - Externally visible function for suspending system.
385  *      @state:         Enumerated value of state to enter.
386  *
387  *      Determine whether or not value is within range, get state
388  *      structure, and enter (above).
389  */
390 int pm_suspend(suspend_state_t state)
391 {
392         if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
393                 return enter_state(state);
394         return -EINVAL;
395 }
396 EXPORT_SYMBOL(pm_suspend);