* which clears VM_LOCKED, otherwise the oom reaper cannot
* reliably test it.
*/
- mutex_lock(&oom_lock);
(void)__oom_reap_task_mm(mm);
- mutex_unlock(&oom_lock);
set_bit(MMF_OOM_SKIP, &mm->flags);
down_write(&mm->mmap_sem);
static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
- bool ret = true;
-
- /*
- * We have to make sure to not race with the victim exit path
- * and cause premature new oom victim selection:
- * oom_reap_task_mm exit_mm
- * mmget_not_zero
- * mmput
- * atomic_dec_and_test
- * exit_oom_victim
- * [...]
- * out_of_memory
- * select_bad_process
- * # no TIF_MEMDIE task selects new victim
- * unmap_page_range # frees some memory
- */
- mutex_lock(&oom_lock);
-
if (!down_read_trylock(&mm->mmap_sem)) {
- ret = false;
trace_skip_task_reaping(tsk->pid);
- goto unlock_oom;
+ return false;
}
/*
if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
up_read(&mm->mmap_sem);
trace_skip_task_reaping(tsk->pid);
- goto unlock_oom;
+ return true;
}
trace_start_task_reaping(tsk->pid);
/* failed to reap part of the address space. Try again later */
if (!__oom_reap_task_mm(mm)) {
up_read(&mm->mmap_sem);
- ret = false;
- goto unlock_oom;
+ return false;
}
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
up_read(&mm->mmap_sem);
trace_finish_task_reaping(tsk->pid);
-unlock_oom:
- mutex_unlock(&oom_lock);
- return ret;
+ return true;
}
#define MAX_OOM_REAP_RETRIES 10