mm, hugetlb: clean-up alloc_huge_page()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / hugetlb.c
index b60f330..a698d40 100644 (file)
@@ -135,9 +135,9 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  *                    across the pages in a mapping.
  *
  * The region data structures are protected by a combination of the mmap_sem
- * and the hugetlb_instantion_mutex.  To access or modify a region the caller
+ * and the hugetlb_instantiation_mutex.  To access or modify a region the caller
  * must either hold the mmap_sem for write, or the mmap_sem for read and
- * the hugetlb_instantiation mutex:
+ * the hugetlb_instantiation_mutex:
  *
  *     down_write(&mm->mmap_sem);
  * or
@@ -539,10 +539,6 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        struct zoneref *z;
        unsigned int cpuset_mems_cookie;
 
-retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
-       zonelist = huge_zonelist(vma, address,
-                                       htlb_alloc_mask, &mpol, &nodemask);
        /*
         * A child process with MAP_PRIVATE mappings created by their parent
         * have no page reserves. This check ensures that reservations are
@@ -556,6 +552,11 @@ retry_cpuset:
        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
                goto err;
 
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
+       zonelist = huge_zonelist(vma, address,
+                                       htlb_alloc_mask, &mpol, &nodemask);
+
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                MAX_NR_ZONES - 1, nodemask) {
                if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
@@ -574,7 +575,6 @@ retry_cpuset:
        return page;
 
 err:
-       mpol_cond_put(mpol);
        return NULL;
 }
 
@@ -1166,12 +1166,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        }
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
-       if (page) {
-               /* update page cgroup details */
-               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
-                                            h_cg, page);
-               spin_unlock(&hugetlb_lock);
-       } else {
+       if (!page) {
                spin_unlock(&hugetlb_lock);
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
@@ -1182,11 +1177,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                        return ERR_PTR(-ENOSPC);
                }
                spin_lock(&hugetlb_lock);
-               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
-                                            h_cg, page);
                list_move(&page->lru, &h->hugepage_activelist);
-               spin_unlock(&hugetlb_lock);
+               /* Fall through */
        }
+       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+       spin_unlock(&hugetlb_lock);
 
        set_page_private(page, (unsigned long)spool);
 
@@ -1526,7 +1521,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
        struct hstate *h;
        NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
 
-       err = strict_strtoul(buf, 10, &count);
+       err = kstrtoul(buf, 10, &count);
        if (err)
                goto out;
 
@@ -1617,7 +1612,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
        if (h->order >= MAX_ORDER)
                return -EINVAL;
 
-       err = strict_strtoul(buf, 10, &input);
+       err = kstrtoul(buf, 10, &input);
        if (err)
                return err;