mm/page_alloc: prevent MIGRATE_RESERVE pages from being misplaced
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / mempolicy.c
index 9c6288a..e8fff0f 100644 (file)
@@ -1060,7 +1060,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
        if (!list_empty(&pagelist)) {
-               err = migrate_pages(&pagelist, new_node_page, dest,
+               err = migrate_pages(&pagelist, new_node_page, NULL, dest,
                                        MIGRATE_SYNC, MR_SYSCALL);
                if (err)
                        putback_movable_pages(&pagelist);
@@ -1306,7 +1306,7 @@ static long do_mbind(unsigned long start, unsigned long len,
 
                if (!list_empty(&pagelist)) {
                        WARN_ON_ONCE(flags & MPOL_MF_LAZY);
-                       nr_failed = migrate_pages(&pagelist, new_page,
+                       nr_failed = migrate_pages(&pagelist, new_page, NULL,
                                start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
                        if (nr_failed)
                                putback_movable_pages(&pagelist);
@@ -1897,7 +1897,7 @@ int node_random(const nodemask_t *maskp)
  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
  * @nodemask for filtering the zonelist.
  *
- * Must be protected by get_mems_allowed()
+ * Must be protected by read_mems_allowed_begin()
  */
 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
                                gfp_t gfp_flags, struct mempolicy **mpol,
@@ -2061,7 +2061,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 
 retry_cpuset:
        pol = get_vma_policy(current, vma, addr);
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
 
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
@@ -2069,7 +2069,7 @@ retry_cpuset:
                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
                mpol_cond_put(pol);
                page = alloc_page_interleave(gfp, order, nid);
-               if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                        goto retry_cpuset;
 
                return page;
@@ -2079,7 +2079,7 @@ retry_cpuset:
                                      policy_nodemask(gfp, pol));
        if (unlikely(mpol_needs_cond_ref(pol)))
                __mpol_put(pol);
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
        return page;
 }
@@ -2113,7 +2113,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                pol = &default_policy;
 
 retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
 
        /*
         * No reference counting needed for current->mempolicy
@@ -2126,7 +2126,7 @@ retry_cpuset:
                                policy_zonelist(gfp, pol, numa_node_id()),
                                policy_nodemask(gfp, pol));
 
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
 
        return page;
@@ -2170,7 +2170,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
        } else
                *new = *old;
 
-       rcu_read_lock();
        if (current_cpuset_is_being_rebound()) {
                nodemask_t mems = cpuset_mems_allowed(current);
                if (new->flags & MPOL_F_REBINDING)
@@ -2178,7 +2177,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
                else
                        mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
        }
-       rcu_read_unlock();
        atomic_set(&new->refcnt, 1);
        return new;
 }