1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
11 * The routines in this file are used to kill a process when
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
21 #include <linux/oom.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/swap.h>
30 #include <linux/timex.h>
31 #include <linux/jiffies.h>
32 #include <linux/cpuset.h>
33 #include <linux/export.h>
34 #include <linux/notifier.h>
35 #include <linux/memcontrol.h>
36 #include <linux/mempolicy.h>
37 #include <linux/security.h>
38 #include <linux/ptrace.h>
39 #include <linux/freezer.h>
40 #include <linux/ftrace.h>
41 #include <linux/ratelimit.h>
42 #include <linux/kthread.h>
43 #include <linux/init.h>
44 #include <linux/mmu_notifier.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/oom.h>
53 int sysctl_panic_on_oom;
54 int sysctl_oom_kill_allocating_task;
55 int sysctl_oom_dump_tasks = 1;
58 * Serializes oom killer invocations (out_of_memory()) from all contexts to
59 * prevent from over eager oom killing (e.g. when the oom killer is invoked
60 * from different domains).
62 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
65 DEFINE_MUTEX(oom_lock);
69 * has_intersects_mems_allowed() - check task eligiblity for kill
70 * @start: task struct of which task to consider
71 * @mask: nodemask passed to page allocator for mempolicy ooms
73 * Task eligibility is determined by whether or not a candidate task, @tsk,
74 * shares the same mempolicy nodes as current if it is bound by such a policy
75 * and whether or not it has the same set of allowed cpuset nodes.
77 static bool has_intersects_mems_allowed(struct task_struct *start,
78 const nodemask_t *mask)
80 struct task_struct *tsk;
84 for_each_thread(start, tsk) {
87 * If this is a mempolicy constrained oom, tsk's
88 * cpuset is irrelevant. Only return true if its
89 * mempolicy intersects current, otherwise it may be
92 ret = mempolicy_nodemask_intersects(tsk, mask);
95 * This is not a mempolicy constrained oom, so only
96 * check the mems of tsk's cpuset.
98 ret = cpuset_mems_allowed_intersects(current, tsk);
108 static bool has_intersects_mems_allowed(struct task_struct *tsk,
109 const nodemask_t *mask)
113 #endif /* CONFIG_NUMA */
116 * The process p may have detached its own ->mm while exiting or through
117 * use_mm(), but one or more of its subthreads may still have a valid
118 * pointer. Return p, or any of its subthreads with a valid ->mm, with
121 struct task_struct *find_lock_task_mm(struct task_struct *p)
123 struct task_struct *t;
127 for_each_thread(p, t) {
141 * order == -1 means the oom kill is required by sysrq, otherwise only
142 * for display purposes.
144 static inline bool is_sysrq_oom(struct oom_control *oc)
146 return oc->order == -1;
149 static inline bool is_memcg_oom(struct oom_control *oc)
151 return oc->memcg != NULL;
154 /* return true if the task is not adequate as candidate victim task. */
155 static bool oom_unkillable_task(struct task_struct *p,
156 const nodemask_t *nodemask)
158 if (is_global_init(p))
160 if (p->flags & PF_KTHREAD)
163 /* p may not have freeable memory in nodemask */
164 if (!has_intersects_mems_allowed(p, nodemask))
171 * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
172 * than all user memory (LRU pages)
174 static bool is_dump_unreclaim_slabs(void)
176 unsigned long nr_lru;
178 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
179 global_node_page_state(NR_INACTIVE_ANON) +
180 global_node_page_state(NR_ACTIVE_FILE) +
181 global_node_page_state(NR_INACTIVE_FILE) +
182 global_node_page_state(NR_ISOLATED_ANON) +
183 global_node_page_state(NR_ISOLATED_FILE) +
184 global_node_page_state(NR_UNEVICTABLE);
186 return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
190 * oom_badness - heuristic function to determine which candidate task to kill
191 * @p: task struct of which task we should calculate
192 * @totalpages: total present RAM allowed for page allocation
193 * @nodemask: nodemask passed to page allocator for mempolicy ooms
195 * The heuristic for determining which task to kill is made to be as simple and
196 * predictable as possible. The goal is to return the highest value for the
197 * task consuming the most memory to avoid subsequent oom failures.
199 unsigned long oom_badness(struct task_struct *p,
200 const nodemask_t *nodemask, unsigned long totalpages)
205 if (oom_unkillable_task(p, nodemask))
208 p = find_lock_task_mm(p);
213 * Do not even consider tasks which are explicitly marked oom
214 * unkillable or have been already oom reaped or the are in
215 * the middle of vfork
217 adj = (long)p->signal->oom_score_adj;
218 if (adj == OOM_SCORE_ADJ_MIN ||
219 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
226 * The baseline for the badness score is the proportion of RAM that each
227 * task's rss, pagetable and swap space use.
229 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
230 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
233 /* Normalize to oom_score_adj units */
234 adj *= totalpages / 1000;
238 * Never return 0 for an eligible task regardless of the root bonus and
239 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
241 return points > 0 ? points : 1;
244 static const char * const oom_constraint_text[] = {
245 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
246 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
247 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
248 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
252 * Determine the type of allocation constraint.
254 static enum oom_constraint constrained_alloc(struct oom_control *oc)
258 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
259 bool cpuset_limited = false;
262 if (is_memcg_oom(oc)) {
263 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
264 return CONSTRAINT_MEMCG;
267 /* Default to all available memory */
268 oc->totalpages = totalram_pages() + total_swap_pages;
270 if (!IS_ENABLED(CONFIG_NUMA))
271 return CONSTRAINT_NONE;
274 return CONSTRAINT_NONE;
276 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
277 * to kill current.We have to random task kill in this case.
278 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
280 if (oc->gfp_mask & __GFP_THISNODE)
281 return CONSTRAINT_NONE;
284 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
285 * the page allocator means a mempolicy is in effect. Cpuset policy
286 * is enforced in get_page_from_freelist().
289 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
290 oc->totalpages = total_swap_pages;
291 for_each_node_mask(nid, *oc->nodemask)
292 oc->totalpages += node_spanned_pages(nid);
293 return CONSTRAINT_MEMORY_POLICY;
296 /* Check this allocation failure is caused by cpuset's wall function */
297 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
298 high_zoneidx, oc->nodemask)
299 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
300 cpuset_limited = true;
302 if (cpuset_limited) {
303 oc->totalpages = total_swap_pages;
304 for_each_node_mask(nid, cpuset_current_mems_allowed)
305 oc->totalpages += node_spanned_pages(nid);
306 return CONSTRAINT_CPUSET;
308 return CONSTRAINT_NONE;
311 static int oom_evaluate_task(struct task_struct *task, void *arg)
313 struct oom_control *oc = arg;
314 unsigned long points;
316 if (oom_unkillable_task(task, oc->nodemask))
320 * This task already has access to memory reserves and is being killed.
321 * Don't allow any other task to have access to the reserves unless
322 * the task has MMF_OOM_SKIP because chances that it would release
323 * any memory is quite low.
325 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
326 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
332 * If task is allocating a lot of memory and has been marked to be
333 * killed first if it triggers an oom, then select it.
335 if (oom_task_origin(task)) {
340 points = oom_badness(task, oc->nodemask, oc->totalpages);
341 if (!points || points < oc->chosen_points)
346 put_task_struct(oc->chosen);
347 get_task_struct(task);
349 oc->chosen_points = points;
354 put_task_struct(oc->chosen);
355 oc->chosen = (void *)-1UL;
360 * Simple selection loop. We choose the process with the highest number of
361 * 'points'. In case scan was aborted, oc->chosen is set to -1.
363 static void select_bad_process(struct oom_control *oc)
365 if (is_memcg_oom(oc))
366 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
368 struct task_struct *p;
372 if (oom_evaluate_task(p, oc))
377 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
380 static int dump_task(struct task_struct *p, void *arg)
382 struct oom_control *oc = arg;
383 struct task_struct *task;
385 if (oom_unkillable_task(p, oc->nodemask))
388 task = find_lock_task_mm(p);
391 * This is a kthread or all of p's threads have already
392 * detached their mm's. There's no need to report
393 * them; they can't be oom killed anyway.
398 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
399 task->pid, from_kuid(&init_user_ns, task_uid(task)),
400 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
401 mm_pgtables_bytes(task->mm),
402 get_mm_counter(task->mm, MM_SWAPENTS),
403 task->signal->oom_score_adj, task->comm);
410 * dump_tasks - dump current memory state of all system tasks
411 * @oc: pointer to struct oom_control
413 * Dumps the current memory state of all eligible tasks. Tasks not in the same
414 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
416 * State information includes task's pid, uid, tgid, vm size, rss,
417 * pgtables_bytes, swapents, oom_score_adj value, and name.
419 static void dump_tasks(struct oom_control *oc)
421 pr_info("Tasks state (memory values in pages):\n");
422 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
424 if (is_memcg_oom(oc))
425 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
427 struct task_struct *p;
436 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
438 /* one line summary of the oom killer context. */
439 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
440 oom_constraint_text[oc->constraint],
441 nodemask_pr_args(oc->nodemask));
442 cpuset_print_current_mems_allowed();
443 mem_cgroup_print_oom_context(oc->memcg, victim);
444 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
445 from_kuid(&init_user_ns, task_uid(victim)));
448 static void dump_header(struct oom_control *oc, struct task_struct *p)
450 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
451 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
452 current->signal->oom_score_adj);
453 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
454 pr_warn("COMPACTION is disabled!!!\n");
457 if (is_memcg_oom(oc))
458 mem_cgroup_print_oom_meminfo(oc->memcg);
460 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
461 if (is_dump_unreclaim_slabs())
462 dump_unreclaimable_slab();
464 if (sysctl_oom_dump_tasks)
467 dump_oom_summary(oc, p);
471 * Number of OOM victims in flight
473 static atomic_t oom_victims = ATOMIC_INIT(0);
474 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
476 static bool oom_killer_disabled __read_mostly;
478 #define K(x) ((x) << (PAGE_SHIFT-10))
481 * task->mm can be NULL if the task is the exited group leader. So to
482 * determine whether the task is using a particular mm, we examine all the
483 * task's threads: if one of those is using this mm then this task was also
486 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
488 struct task_struct *t;
490 for_each_thread(p, t) {
491 struct mm_struct *t_mm = READ_ONCE(t->mm);
500 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
501 * victim (if that is possible) to help the OOM killer to move on.
503 static struct task_struct *oom_reaper_th;
504 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
505 static struct task_struct *oom_reaper_list;
506 static DEFINE_SPINLOCK(oom_reaper_lock);
508 bool __oom_reap_task_mm(struct mm_struct *mm)
510 struct vm_area_struct *vma;
514 * Tell all users of get_user/copy_from_user etc... that the content
515 * is no longer stable. No barriers really needed because unmapping
516 * should imply barriers already and the reader would hit a page fault
517 * if it stumbled over a reaped memory.
519 set_bit(MMF_UNSTABLE, &mm->flags);
521 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
522 if (!can_madv_dontneed_vma(vma))
526 * Only anonymous pages have a good chance to be dropped
527 * without additional steps which we cannot afford as we
530 * We do not even care about fs backed pages because all
531 * which are reclaimable have already been reclaimed and
532 * we do not want to block exit_mmap by keeping mm ref
533 * count elevated without a good reason.
535 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
536 struct mmu_notifier_range range;
537 struct mmu_gather tlb;
539 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
540 vma, mm, vma->vm_start,
542 tlb_gather_mmu(&tlb, mm, range.start, range.end);
543 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
544 tlb_finish_mmu(&tlb, range.start, range.end);
548 unmap_page_range(&tlb, vma, range.start, range.end, NULL);
549 mmu_notifier_invalidate_range_end(&range);
550 tlb_finish_mmu(&tlb, range.start, range.end);
558 * Reaps the address space of the give task.
560 * Returns true on success and false if none or part of the address space
561 * has been reclaimed and the caller should retry later.
563 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
567 if (!down_read_trylock(&mm->mmap_sem)) {
568 trace_skip_task_reaping(tsk->pid);
573 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
574 * work on the mm anymore. The check for MMF_OOM_SKIP must run
575 * under mmap_sem for reading because it serializes against the
576 * down_write();up_write() cycle in exit_mmap().
578 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
579 trace_skip_task_reaping(tsk->pid);
583 trace_start_task_reaping(tsk->pid);
585 /* failed to reap part of the address space. Try again later */
586 ret = __oom_reap_task_mm(mm);
590 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
591 task_pid_nr(tsk), tsk->comm,
592 K(get_mm_counter(mm, MM_ANONPAGES)),
593 K(get_mm_counter(mm, MM_FILEPAGES)),
594 K(get_mm_counter(mm, MM_SHMEMPAGES)));
596 trace_finish_task_reaping(tsk->pid);
598 up_read(&mm->mmap_sem);
603 #define MAX_OOM_REAP_RETRIES 10
604 static void oom_reap_task(struct task_struct *tsk)
607 struct mm_struct *mm = tsk->signal->oom_mm;
609 /* Retry the down_read_trylock(mmap_sem) a few times */
610 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
611 schedule_timeout_idle(HZ/10);
613 if (attempts <= MAX_OOM_REAP_RETRIES ||
614 test_bit(MMF_OOM_SKIP, &mm->flags))
617 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
618 task_pid_nr(tsk), tsk->comm);
619 debug_show_all_locks();
622 tsk->oom_reaper_list = NULL;
625 * Hide this mm from OOM killer because it has been either reaped or
626 * somebody can't call up_write(mmap_sem).
628 set_bit(MMF_OOM_SKIP, &mm->flags);
630 /* Drop a reference taken by wake_oom_reaper */
631 put_task_struct(tsk);
634 static int oom_reaper(void *unused)
637 struct task_struct *tsk = NULL;
639 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
640 spin_lock(&oom_reaper_lock);
641 if (oom_reaper_list != NULL) {
642 tsk = oom_reaper_list;
643 oom_reaper_list = tsk->oom_reaper_list;
645 spin_unlock(&oom_reaper_lock);
654 static void wake_oom_reaper(struct task_struct *tsk)
656 /* mm is already queued? */
657 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
660 get_task_struct(tsk);
662 spin_lock(&oom_reaper_lock);
663 tsk->oom_reaper_list = oom_reaper_list;
664 oom_reaper_list = tsk;
665 spin_unlock(&oom_reaper_lock);
666 trace_wake_reaper(tsk->pid);
667 wake_up(&oom_reaper_wait);
670 static int __init oom_init(void)
672 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
675 subsys_initcall(oom_init)
677 static inline void wake_oom_reaper(struct task_struct *tsk)
680 #endif /* CONFIG_MMU */
683 * mark_oom_victim - mark the given task as OOM victim
686 * Has to be called with oom_lock held and never after
687 * oom has been disabled already.
689 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
690 * under task_lock or operate on the current).
692 static void mark_oom_victim(struct task_struct *tsk)
694 struct mm_struct *mm = tsk->mm;
696 WARN_ON(oom_killer_disabled);
697 /* OOM killer might race with memcg OOM */
698 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
701 /* oom_mm is bound to the signal struct life time. */
702 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
703 mmgrab(tsk->signal->oom_mm);
704 set_bit(MMF_OOM_VICTIM, &mm->flags);
708 * Make sure that the task is woken up from uninterruptible sleep
709 * if it is frozen because OOM killer wouldn't be able to free
710 * any memory and livelock. freezing_slow_path will tell the freezer
711 * that TIF_MEMDIE tasks should be ignored.
714 atomic_inc(&oom_victims);
715 trace_mark_victim(tsk->pid);
719 * exit_oom_victim - note the exit of an OOM victim
721 void exit_oom_victim(void)
723 clear_thread_flag(TIF_MEMDIE);
725 if (!atomic_dec_return(&oom_victims))
726 wake_up_all(&oom_victims_wait);
730 * oom_killer_enable - enable OOM killer
732 void oom_killer_enable(void)
734 oom_killer_disabled = false;
735 pr_info("OOM killer enabled.\n");
739 * oom_killer_disable - disable OOM killer
740 * @timeout: maximum timeout to wait for oom victims in jiffies
742 * Forces all page allocations to fail rather than trigger OOM killer.
743 * Will block and wait until all OOM victims are killed or the given
746 * The function cannot be called when there are runnable user tasks because
747 * the userspace would see unexpected allocation failures as a result. Any
748 * new usage of this function should be consulted with MM people.
750 * Returns true if successful and false if the OOM killer cannot be
753 bool oom_killer_disable(signed long timeout)
758 * Make sure to not race with an ongoing OOM killer. Check that the
759 * current is not killed (possibly due to sharing the victim's memory).
761 if (mutex_lock_killable(&oom_lock))
763 oom_killer_disabled = true;
764 mutex_unlock(&oom_lock);
766 ret = wait_event_interruptible_timeout(oom_victims_wait,
767 !atomic_read(&oom_victims), timeout);
772 pr_info("OOM killer disabled.\n");
777 static inline bool __task_will_free_mem(struct task_struct *task)
779 struct signal_struct *sig = task->signal;
782 * A coredumping process may sleep for an extended period in exit_mm(),
783 * so the oom killer cannot assume that the process will promptly exit
784 * and release memory.
786 if (sig->flags & SIGNAL_GROUP_COREDUMP)
789 if (sig->flags & SIGNAL_GROUP_EXIT)
792 if (thread_group_empty(task) && (task->flags & PF_EXITING))
799 * Checks whether the given task is dying or exiting and likely to
800 * release its address space. This means that all threads and processes
801 * sharing the same mm have to be killed or exiting.
802 * Caller has to make sure that task->mm is stable (hold task_lock or
803 * it operates on the current).
805 static bool task_will_free_mem(struct task_struct *task)
807 struct mm_struct *mm = task->mm;
808 struct task_struct *p;
812 * Skip tasks without mm because it might have passed its exit_mm and
813 * exit_oom_victim. oom_reaper could have rescued that but do not rely
814 * on that for now. We can consider find_lock_task_mm in future.
819 if (!__task_will_free_mem(task))
823 * This task has already been drained by the oom reaper so there are
824 * only small chances it will free some more
826 if (test_bit(MMF_OOM_SKIP, &mm->flags))
829 if (atomic_read(&mm->mm_users) <= 1)
833 * Make sure that all tasks which share the mm with the given tasks
834 * are dying as well to make sure that a) nobody pins its mm and
835 * b) the task is also reapable by the oom reaper.
838 for_each_process(p) {
839 if (!process_shares_mm(p, mm))
841 if (same_thread_group(task, p))
843 ret = __task_will_free_mem(p);
852 static void __oom_kill_process(struct task_struct *victim, const char *message)
854 struct task_struct *p;
855 struct mm_struct *mm;
856 bool can_oom_reap = true;
858 p = find_lock_task_mm(victim);
860 put_task_struct(victim);
862 } else if (victim != p) {
864 put_task_struct(victim);
868 /* Get a reference to safely compare mm after task_unlock(victim) */
872 /* Raise event before sending signal: task reaper must see this */
873 count_vm_event(OOM_KILL);
874 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
877 * We should send SIGKILL before granting access to memory reserves
878 * in order to prevent the OOM victim from depleting the memory
879 * reserves from the user space under its control.
881 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
882 mark_oom_victim(victim);
883 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
884 message, task_pid_nr(victim), victim->comm,
885 K(victim->mm->total_vm),
886 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
887 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
888 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
892 * Kill all user processes sharing victim->mm in other thread groups, if
893 * any. They don't get access to memory reserves, though, to avoid
894 * depletion of all memory. This prevents mm->mmap_sem livelock when an
895 * oom killed thread cannot exit because it requires the semaphore and
896 * its contended by another thread trying to allocate memory itself.
897 * That thread will now get access to memory reserves since it has a
898 * pending fatal signal.
901 for_each_process(p) {
902 if (!process_shares_mm(p, mm))
904 if (same_thread_group(p, victim))
906 if (is_global_init(p)) {
907 can_oom_reap = false;
908 set_bit(MMF_OOM_SKIP, &mm->flags);
909 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
910 task_pid_nr(victim), victim->comm,
911 task_pid_nr(p), p->comm);
915 * No use_mm() user needs to read from the userspace so we are
918 if (unlikely(p->flags & PF_KTHREAD))
920 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
925 wake_oom_reaper(victim);
928 put_task_struct(victim);
933 * Kill provided task unless it's secured by setting
934 * oom_score_adj to OOM_SCORE_ADJ_MIN.
936 static int oom_kill_memcg_member(struct task_struct *task, void *message)
938 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
939 !is_global_init(task)) {
940 get_task_struct(task);
941 __oom_kill_process(task, message);
946 static void oom_kill_process(struct oom_control *oc, const char *message)
948 struct task_struct *victim = oc->chosen;
949 struct mem_cgroup *oom_group;
950 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
951 DEFAULT_RATELIMIT_BURST);
954 * If the task is already exiting, don't alarm the sysadmin or kill
955 * its children or threads, just give it access to memory reserves
956 * so it can die quickly
959 if (task_will_free_mem(victim)) {
960 mark_oom_victim(victim);
961 wake_oom_reaper(victim);
963 put_task_struct(victim);
968 if (__ratelimit(&oom_rs))
969 dump_header(oc, victim);
972 * Do we need to kill the entire memory cgroup?
973 * Or even one of the ancestor memory cgroups?
974 * Check this out before killing the victim task.
976 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
978 __oom_kill_process(victim, message);
981 * If necessary, kill all tasks in the selected memory cgroup.
984 mem_cgroup_print_oom_group(oom_group);
985 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
987 mem_cgroup_put(oom_group);
992 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
994 static void check_panic_on_oom(struct oom_control *oc)
996 if (likely(!sysctl_panic_on_oom))
998 if (sysctl_panic_on_oom != 2) {
1000 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1001 * does not panic for cpuset, mempolicy, or memcg allocation
1004 if (oc->constraint != CONSTRAINT_NONE)
1007 /* Do not panic for oom kills triggered by sysrq */
1008 if (is_sysrq_oom(oc))
1010 dump_header(oc, NULL);
1011 panic("Out of memory: %s panic_on_oom is enabled\n",
1012 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1015 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1017 int register_oom_notifier(struct notifier_block *nb)
1019 return blocking_notifier_chain_register(&oom_notify_list, nb);
1021 EXPORT_SYMBOL_GPL(register_oom_notifier);
1023 int unregister_oom_notifier(struct notifier_block *nb)
1025 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1027 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1030 * out_of_memory - kill the "best" process when we run out of memory
1031 * @oc: pointer to struct oom_control
1033 * If we run out of memory, we have the choice between either
1034 * killing a random task (bad), letting the system crash (worse)
1035 * OR try to be smart about which process to kill. Note that we
1036 * don't have to be perfect here, we just have to be good.
1038 bool out_of_memory(struct oom_control *oc)
1040 unsigned long freed = 0;
1042 if (oom_killer_disabled)
1045 if (!is_memcg_oom(oc)) {
1046 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1048 /* Got some memory back in the last second. */
1053 * If current has a pending SIGKILL or is exiting, then automatically
1054 * select it. The goal is to allow it to allocate so that it may
1055 * quickly exit and free its memory.
1057 if (task_will_free_mem(current)) {
1058 mark_oom_victim(current);
1059 wake_oom_reaper(current);
1064 * The OOM killer does not compensate for IO-less reclaim.
1065 * pagefault_out_of_memory lost its gfp context so we have to
1066 * make sure exclude 0 mask - all other users should have at least
1067 * ___GFP_DIRECT_RECLAIM to get here.
1069 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
1073 * Check if there were limitations on the allocation (only relevant for
1074 * NUMA and memcg) that may require different handling.
1076 oc->constraint = constrained_alloc(oc);
1077 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1078 oc->nodemask = NULL;
1079 check_panic_on_oom(oc);
1081 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1082 current->mm && !oom_unkillable_task(current, oc->nodemask) &&
1083 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1084 get_task_struct(current);
1085 oc->chosen = current;
1086 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1090 select_bad_process(oc);
1091 /* Found nothing?!?! */
1093 dump_header(oc, NULL);
1094 pr_warn("Out of memory and no killable processes...\n");
1096 * If we got here due to an actual allocation at the
1097 * system level, we cannot survive this and will enter
1098 * an endless loop in the allocator. Bail out now.
1100 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1101 panic("System is deadlocked on memory\n");
1103 if (oc->chosen && oc->chosen != (void *)-1UL)
1104 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1105 "Memory cgroup out of memory");
1106 return !!oc->chosen;
1110 * The pagefault handler calls here because it is out of memory, so kill a
1111 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1112 * killing is already in progress so do nothing.
1114 void pagefault_out_of_memory(void)
1116 struct oom_control oc = {
1124 if (mem_cgroup_oom_synchronize(true))
1127 if (!mutex_trylock(&oom_lock))
1130 mutex_unlock(&oom_lock);