Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / memory-failure.c
index 4eeb0a8..a98c7fc 100644 (file)
@@ -384,15 +384,44 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
        }
 }
 
-static int task_early_kill(struct task_struct *tsk, int force_early)
+/*
+ * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
+ * on behalf of the thread group. Return task_struct of the (first found)
+ * dedicated thread if found, and return NULL otherwise.
+ *
+ * We already hold read_lock(&tasklist_lock) in the caller, so we don't
+ * have to call rcu_read_lock/unlock() in this function.
+ */
+static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
+{
+       struct task_struct *t;
+
+       for_each_thread(tsk, t)
+               if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
+                       return t;
+       return NULL;
+}
+
+/*
+ * Determine whether a given process is "early kill" process which expects
+ * to be signaled when some page under the process is hwpoisoned.
+ * Return task_struct of the dedicated thread (main thread unless explicitly
+ * specified) if the process is "early kill," and otherwise returns NULL.
+ */
+static struct task_struct *task_early_kill(struct task_struct *tsk,
+                                          int force_early)
 {
+       struct task_struct *t;
        if (!tsk->mm)
-               return 0;
+               return NULL;
        if (force_early)
-               return 1;
-       if (tsk->flags & PF_MCE_PROCESS)
-               return !!(tsk->flags & PF_MCE_EARLY);
-       return sysctl_memory_failure_early_kill;
+               return tsk;
+       t = find_early_kill_thread(tsk);
+       if (t)
+               return t;
+       if (sysctl_memory_failure_early_kill)
+               return tsk;
+       return NULL;
 }
 
 /*
@@ -414,16 +443,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
+               struct task_struct *t = task_early_kill(tsk, force_early);
 
-               if (!task_early_kill(tsk, force_early))
+               if (!t)
                        continue;
                anon_vma_interval_tree_foreach(vmac, &av->rb_root,
                                               pgoff, pgoff) {
                        vma = vmac->vma;
                        if (!page_mapped_in_vma(page, vma))
                                continue;
-                       if (vma->vm_mm == tsk->mm)
-                               add_to_kill(tsk, page, vma, to_kill, tkc);
+                       if (vma->vm_mm == t->mm)
+                               add_to_kill(t, page, vma, to_kill, tkc);
                }
        }
        read_unlock(&tasklist_lock);
@@ -444,10 +474,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        read_lock(&tasklist_lock);
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+               struct task_struct *t = task_early_kill(tsk, force_early);
 
-               if (!task_early_kill(tsk, force_early))
+               if (!t)
                        continue;
-
                vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
                                      pgoff) {
                        /*
@@ -457,8 +487,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                         * Assume applications who requested early kill want
                         * to be informed of all such data corruptions.
                         */
-                       if (vma->vm_mm == tsk->mm)
-                               add_to_kill(tsk, page, vma, to_kill, tkc);
+                       if (vma->vm_mm == t->mm)
+                               add_to_kill(t, page, vma, to_kill, tkc);
                }
        }
        read_unlock(&tasklist_lock);
@@ -1510,7 +1540,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
 
        /* Keep page count to indicate a given hugepage is isolated. */
        list_move(&hpage->lru, &pagelist);
-       ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+       ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
                                MIGRATE_SYNC, MR_MEMORY_FAILURE);
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
@@ -1591,7 +1621,7 @@ static int __soft_offline_page(struct page *page, int flags)
                inc_zone_page_state(page, NR_ISOLATED_ANON +
                                        page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
-               ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+               ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
                                        MIGRATE_SYNC, MR_MEMORY_FAILURE);
                if (ret) {
                        if (!list_empty(&pagelist)) {