Merge tag 'icc-6.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/djakov/icc...
[platform/kernel/linux-starfive.git] / kernel / power / process.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/power/process.c - Functions for starting/stopping processes on
4  *                           suspend transitions.
5  *
6  * Originally from swsusp.
7  */
8
9 #include <linux/interrupt.h>
10 #include <linux/oom.h>
11 #include <linux/suspend.h>
12 #include <linux/module.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task.h>
15 #include <linux/syscalls.h>
16 #include <linux/freezer.h>
17 #include <linux/delay.h>
18 #include <linux/workqueue.h>
19 #include <linux/kmod.h>
20 #include <trace/events/power.h>
21 #include <linux/cpuset.h>
22
23 /*
24  * Timeout for stopping processes
25  */
26 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
27
28 static int try_to_freeze_tasks(bool user_only)
29 {
30         struct task_struct *g, *p;
31         unsigned long end_time;
32         unsigned int todo;
33         bool wq_busy = false;
34         ktime_t start, end, elapsed;
35         unsigned int elapsed_msecs;
36         bool wakeup = false;
37         int sleep_usecs = USEC_PER_MSEC;
38
39         start = ktime_get_boottime();
40
41         end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
42
43         if (!user_only)
44                 freeze_workqueues_begin();
45
46         while (true) {
47                 todo = 0;
48                 read_lock(&tasklist_lock);
49                 for_each_process_thread(g, p) {
50                         if (p == current || !freeze_task(p))
51                                 continue;
52
53                         todo++;
54                 }
55                 read_unlock(&tasklist_lock);
56
57                 if (!user_only) {
58                         wq_busy = freeze_workqueues_busy();
59                         todo += wq_busy;
60                 }
61
62                 if (!todo || time_after(jiffies, end_time))
63                         break;
64
65                 if (pm_wakeup_pending()) {
66                         wakeup = true;
67                         break;
68                 }
69
70                 /*
71                  * We need to retry, but first give the freezing tasks some
72                  * time to enter the refrigerator.  Start with an initial
73                  * 1 ms sleep followed by exponential backoff until 8 ms.
74                  */
75                 usleep_range(sleep_usecs / 2, sleep_usecs);
76                 if (sleep_usecs < 8 * USEC_PER_MSEC)
77                         sleep_usecs *= 2;
78         }
79
80         end = ktime_get_boottime();
81         elapsed = ktime_sub(end, start);
82         elapsed_msecs = ktime_to_ms(elapsed);
83
84         if (todo) {
85                 pr_cont("\n");
86                 pr_err("Freezing of tasks %s after %d.%03d seconds "
87                        "(%d tasks refusing to freeze, wq_busy=%d):\n",
88                        wakeup ? "aborted" : "failed",
89                        elapsed_msecs / 1000, elapsed_msecs % 1000,
90                        todo - wq_busy, wq_busy);
91
92                 if (wq_busy)
93                         show_all_workqueues();
94
95                 if (!wakeup || pm_debug_messages_on) {
96                         read_lock(&tasklist_lock);
97                         for_each_process_thread(g, p) {
98                                 if (p != current && freezing(p) && !frozen(p))
99                                         sched_show_task(p);
100                         }
101                         read_unlock(&tasklist_lock);
102                 }
103         } else {
104                 pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
105                         elapsed_msecs % 1000);
106         }
107
108         return todo ? -EBUSY : 0;
109 }
110
111 /**
112  * freeze_processes - Signal user space processes to enter the refrigerator.
113  * The current thread will not be frozen.  The same process that calls
114  * freeze_processes must later call thaw_processes.
115  *
116  * On success, returns 0.  On failure, -errno and system is fully thawed.
117  */
118 int freeze_processes(void)
119 {
120         int error;
121
122         error = __usermodehelper_disable(UMH_FREEZING);
123         if (error)
124                 return error;
125
126         /* Make sure this task doesn't get frozen */
127         current->flags |= PF_SUSPEND_TASK;
128
129         if (!pm_freezing)
130                 static_branch_inc(&freezer_active);
131
132         pm_wakeup_clear(0);
133         pr_info("Freezing user space processes ... ");
134         pm_freezing = true;
135         error = try_to_freeze_tasks(true);
136         if (!error) {
137                 __usermodehelper_set_disable_depth(UMH_DISABLED);
138                 pr_cont("done.");
139         }
140         pr_cont("\n");
141         BUG_ON(in_atomic());
142
143         /*
144          * Now that the whole userspace is frozen we need to disable
145          * the OOM killer to disallow any further interference with
146          * killable tasks. There is no guarantee oom victims will
147          * ever reach a point they go away we have to wait with a timeout.
148          */
149         if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
150                 error = -EBUSY;
151
152         if (error)
153                 thaw_processes();
154         return error;
155 }
156
157 /**
158  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
159  *
160  * On success, returns 0.  On failure, -errno and only the kernel threads are
161  * thawed, so as to give a chance to the caller to do additional cleanups
162  * (if any) before thawing the userspace tasks. So, it is the responsibility
163  * of the caller to thaw the userspace tasks, when the time is right.
164  */
165 int freeze_kernel_threads(void)
166 {
167         int error;
168
169         pr_info("Freezing remaining freezable tasks ... ");
170
171         pm_nosig_freezing = true;
172         error = try_to_freeze_tasks(false);
173         if (!error)
174                 pr_cont("done.");
175
176         pr_cont("\n");
177         BUG_ON(in_atomic());
178
179         if (error)
180                 thaw_kernel_threads();
181         return error;
182 }
183
184 void thaw_processes(void)
185 {
186         struct task_struct *g, *p;
187         struct task_struct *curr = current;
188
189         trace_suspend_resume(TPS("thaw_processes"), 0, true);
190         if (pm_freezing)
191                 static_branch_dec(&freezer_active);
192         pm_freezing = false;
193         pm_nosig_freezing = false;
194
195         oom_killer_enable();
196
197         pr_info("Restarting tasks ... ");
198
199         __usermodehelper_set_disable_depth(UMH_FREEZING);
200         thaw_workqueues();
201
202         cpuset_wait_for_hotplug();
203
204         read_lock(&tasklist_lock);
205         for_each_process_thread(g, p) {
206                 /* No other threads should have PF_SUSPEND_TASK set */
207                 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
208                 __thaw_task(p);
209         }
210         read_unlock(&tasklist_lock);
211
212         WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
213         curr->flags &= ~PF_SUSPEND_TASK;
214
215         usermodehelper_enable();
216
217         schedule();
218         pr_cont("done.\n");
219         trace_suspend_resume(TPS("thaw_processes"), 0, false);
220 }
221
222 void thaw_kernel_threads(void)
223 {
224         struct task_struct *g, *p;
225
226         pm_nosig_freezing = false;
227         pr_info("Restarting kernel threads ... ");
228
229         thaw_workqueues();
230
231         read_lock(&tasklist_lock);
232         for_each_process_thread(g, p) {
233                 if (p->flags & PF_KTHREAD)
234                         __thaw_task(p);
235         }
236         read_unlock(&tasklist_lock);
237
238         schedule();
239         pr_cont("done.\n");
240 }