extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
-extern bool oom_killer_disable(void);
+extern bool oom_killer_disable(signed long timeout);
extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
/*
* Now that the whole userspace is frozen we need to disbale
* the OOM killer to disallow any further interference with
- * killable tasks.
+ * killable tasks. There is no guarantee oom victims will
+ * ever reach a point they go away we have to wait with a timeout.
*/
- if (!error && !oom_killer_disable())
+ if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
error = -EBUSY;
- /*
- * There is a hard to fix race between oom_reaper kernel thread
- * and oom_killer_disable. oom_reaper calls exit_oom_victim
- * before the victim reaches exit_mm so try to freeze all the tasks
- * again and catch such a left over task.
- */
- if (!error) {
- pr_info("Double checking all user space processes after OOM killer disable... ");
- error = try_to_freeze_tasks(true);
- pr_cont("\n");
- }
-
if (error)
thaw_processes();
return error;
debug_show_all_locks();
done:
- /*
- * Clear TIF_MEMDIE because the task shouldn't be sitting on a
- * reasonably reclaimable memory anymore or it is not a good candidate
- * for the oom victim right now because it cannot release its memory
- * itself nor by the oom reaper.
- */
tsk->oom_reaper_list = NULL;
- exit_oom_victim(tsk);
/*
* Hide this mm from OOM killer because it has been either reaped or
static int oom_reaper(void *unused)
{
- set_freezable();
-
while (true) {
struct task_struct *tsk = NULL;
}
/**
+ * oom_killer_enable - enable OOM killer
+ */
+void oom_killer_enable(void)
+{
+ oom_killer_disabled = false;
+}
+
+/**
* oom_killer_disable - disable OOM killer
+ * @timeout: maximum timeout to wait for oom victims in jiffies
*
* Forces all page allocations to fail rather than trigger OOM killer.
- * Will block and wait until all OOM victims are killed.
+ * Will block and wait until all OOM victims are killed or the given
+ * timeout expires.
*
* The function cannot be called when there are runnable user tasks because
* the userspace would see unexpected allocation failures as a result. Any
* Returns true if successful and false if the OOM killer cannot be
* disabled.
*/
-bool oom_killer_disable(void)
+bool oom_killer_disable(signed long timeout)
{
+ signed long ret;
+
/*
* Make sure to not race with an ongoing OOM killer. Check that the
* current is not killed (possibly due to sharing the victim's memory).
oom_killer_disabled = true;
mutex_unlock(&oom_lock);
- wait_event(oom_victims_wait, !atomic_read(&oom_victims));
+ ret = wait_event_interruptible_timeout(oom_victims_wait,
+ !atomic_read(&oom_victims), timeout);
+ if (ret <= 0) {
+ oom_killer_enable();
+ return false;
+ }
return true;
}
-/**
- * oom_killer_enable - enable OOM killer
- */
-void oom_killer_enable(void)
-{
- oom_killer_disabled = false;
-}
-
static inline bool __task_will_free_mem(struct task_struct *task)
{
struct signal_struct *sig = task->signal;