mm, hugetlb: return a reserved page to a reserved pool if failed
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / hugetlb.c
index 08b7595..0631556 100644 (file)
@@ -434,25 +434,6 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
        return (get_vma_private_data(vma) & flag) != 0;
 }
 
-/* Decrement the reserved pages in the hugepage pool by one */
-static void decrement_hugepage_resv_vma(struct hstate *h,
-                       struct vm_area_struct *vma)
-{
-       if (vma->vm_flags & VM_NORESERVE)
-               return;
-
-       if (vma->vm_flags & VM_MAYSHARE) {
-               /* Shared mappings always use reserves */
-               h->resv_huge_pages--;
-       } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
-               /*
-                * Only the process that called mmap() has reserves for
-                * private mappings.
-                */
-               h->resv_huge_pages--;
-       }
-}
-
 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 {
@@ -462,12 +443,35 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma, long chg)
 {
+       if (vma->vm_flags & VM_NORESERVE) {
+               /*
+                * This address is already reserved by other process(chg == 0),
+                * so, we should decrement reserved count. Without decrementing,
+                * reserve count remains after releasing inode, because this
+                * allocated page will go into page cache and is regarded as
+                * coming from reserved pool in releasing step.  Currently, we
+                * don't have any other solution to deal with this situation
+                * properly, so add work-around here.
+                */
+               if (vma->vm_flags & VM_MAYSHARE && chg == 0)
+                       return 1;
+               else
+                       return 0;
+       }
+
+       /* Shared mappings always use reserves */
        if (vma->vm_flags & VM_MAYSHARE)
                return 1;
+
+       /*
+        * Only the process that called mmap() has reserves for
+        * private mappings.
+        */
        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                return 1;
+
        return 0;
 }
 
@@ -529,7 +533,8 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 
 static struct page *dequeue_huge_page_vma(struct hstate *h,
                                struct vm_area_struct *vma,
-                               unsigned long address, int avoid_reserve)
+                               unsigned long address, int avoid_reserve,
+                               long chg)
 {
        struct page *page = NULL;
        struct mempolicy *mpol;
@@ -544,7 +549,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
         * have no page reserves. This check ensures that reservations are
         * not "stolen". The child may still get SIGKILLed
         */
-       if (!vma_has_reserves(vma) &&
+       if (!vma_has_reserves(vma, chg) &&
                        h->free_huge_pages - h->resv_huge_pages == 0)
                goto err;
 
@@ -562,8 +567,13 @@ retry_cpuset:
                if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
                        page = dequeue_huge_page_node(h, zone_to_nid(zone));
                        if (page) {
-                               if (!avoid_reserve)
-                                       decrement_hugepage_resv_vma(h, vma);
+                               if (avoid_reserve)
+                                       break;
+                               if (!vma_has_reserves(vma, chg))
+                                       break;
+
+                               SetPagePrivate(page);
+                               h->resv_huge_pages--;
                                break;
                        }
                }
@@ -620,15 +630,20 @@ static void free_huge_page(struct page *page)
        int nid = page_to_nid(page);
        struct hugepage_subpool *spool =
                (struct hugepage_subpool *)page_private(page);
+       bool restore_reserve;
 
        set_page_private(page, 0);
        page->mapping = NULL;
        BUG_ON(page_count(page));
        BUG_ON(page_mapcount(page));
+       restore_reserve = PagePrivate(page);
 
        spin_lock(&hugetlb_lock);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
+       if (restore_reserve)
+               h->resv_huge_pages++;
+
        if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
                /* remove the page from active list */
                list_del(&page->lru);
@@ -946,10 +961,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
  */
 struct page *alloc_huge_page_node(struct hstate *h, int nid)
 {
-       struct page *page;
+       struct page *page = NULL;
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page_node(h, nid);
+       if (h->free_huge_pages - h->resv_huge_pages > 0)
+               page = dequeue_huge_page_node(h, nid);
        spin_unlock(&hugetlb_lock);
 
        if (!page)
@@ -1037,11 +1053,8 @@ free:
        spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
-       if (!list_empty(&surplus_list)) {
-               list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
-                       put_page(page);
-               }
-       }
+       list_for_each_entry_safe(page, tmp, &surplus_list, lru)
+               put_page(page);
        spin_lock(&hugetlb_lock);
 
        return ret;
@@ -1108,9 +1121,9 @@ static long vma_needs_reservation(struct hstate *h,
        } else  {
                long err;
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
-               struct resv_map *reservations = vma_resv_map(vma);
+               struct resv_map *resv = vma_resv_map(vma);
 
-               err = region_chg(&reservations->regions, idx, idx + 1);
+               err = region_chg(&resv->regions, idx, idx + 1);
                if (err < 0)
                        return err;
                return 0;
@@ -1128,10 +1141,10 @@ static void vma_commit_reservation(struct hstate *h,
 
        } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
-               struct resv_map *reservations = vma_resv_map(vma);
+               struct resv_map *resv = vma_resv_map(vma);
 
                /* Mark this page used in the map. */
-               region_add(&reservations->regions, idx, idx + 1);
+               region_add(&resv->regions, idx, idx + 1);
        }
 }
 
@@ -1157,17 +1170,18 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
                return ERR_PTR(-ENOMEM);
-       if (chg)
-               if (hugepage_subpool_get_pages(spool, chg))
+       if (chg || avoid_reserve)
+               if (hugepage_subpool_get_pages(spool, 1))
                        return ERR_PTR(-ENOSPC);
 
        ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
        if (ret) {
-               hugepage_subpool_put_pages(spool, chg);
+               if (chg || avoid_reserve)
+                       hugepage_subpool_put_pages(spool, 1);
                return ERR_PTR(-ENOSPC);
        }
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
+       page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
        if (!page) {
                spin_unlock(&hugetlb_lock);
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
@@ -1175,7 +1189,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                        hugetlb_cgroup_uncharge_cgroup(idx,
                                                       pages_per_huge_page(h),
                                                       h_cg);
-                       hugepage_subpool_put_pages(spool, chg);
+                       if (chg || avoid_reserve)
+                               hugepage_subpool_put_pages(spool, 1);
                        return ERR_PTR(-ENOSPC);
                }
                spin_lock(&hugetlb_lock);
@@ -2181,7 +2196,7 @@ out:
 
 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
 {
-       struct resv_map *reservations = vma_resv_map(vma);
+       struct resv_map *resv = vma_resv_map(vma);
 
        /*
         * This new VMA should share its siblings reservation map if present.
@@ -2191,34 +2206,34 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
         * after this open call completes.  It is therefore safe to take a
         * new reference here without additional locking.
         */
-       if (reservations)
-               kref_get(&reservations->refs);
+       if (resv)
+               kref_get(&resv->refs);
 }
 
 static void resv_map_put(struct vm_area_struct *vma)
 {
-       struct resv_map *reservations = vma_resv_map(vma);
+       struct resv_map *resv = vma_resv_map(vma);
 
-       if (!reservations)
+       if (!resv)
                return;
-       kref_put(&reservations->refs, resv_map_release);
+       kref_put(&resv->refs, resv_map_release);
 }
 
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
        struct hstate *h = hstate_vma(vma);
-       struct resv_map *reservations = vma_resv_map(vma);
+       struct resv_map *resv = vma_resv_map(vma);
        struct hugepage_subpool *spool = subpool_vma(vma);
        unsigned long reserve;
        unsigned long start;
        unsigned long end;
 
-       if (reservations) {
+       if (resv) {
                start = vma_hugecache_offset(h, vma, vma->vm_start);
                end = vma_hugecache_offset(h, vma, vma->vm_end);
 
                reserve = (end - start) -
-                       region_count(&reservations->regions, start, end);
+                       region_count(&resv->regions, start, end);
 
                resv_map_put(vma);
 
@@ -2531,7 +2546,6 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct hstate *h = hstate_vma(vma);
        struct page *old_page, *new_page;
-       int avoidcopy;
        int outside_reserve = 0;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
@@ -2541,10 +2555,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 retry_avoidcopy:
        /* If no-one else is actually using this page, avoid the copy
         * and just make the page writable */
-       avoidcopy = (page_mapcount(old_page) == 1);
-       if (avoidcopy) {
-               if (PageAnon(old_page))
-                       page_move_anon_rmap(old_page, vma, address);
+       if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
+               page_move_anon_rmap(old_page, vma, address);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -2558,8 +2570,7 @@ retry_avoidcopy:
         * at the time of fork() could consume its reserves on COW instead
         * of the full address range.
         */
-       if (!(vma->vm_flags & VM_MAYSHARE) &&
-                       is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
                        old_page != pagecache_page)
                outside_reserve = 1;
 
@@ -2631,6 +2642,8 @@ retry_avoidcopy:
        spin_lock(&mm->page_table_lock);
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
+               ClearPagePrivate(new_page);
+
                /* Break COW */
                huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
@@ -2642,10 +2655,11 @@ retry_avoidcopy:
        }
        spin_unlock(&mm->page_table_lock);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-       /* Caller expects lock to be held */
-       spin_lock(&mm->page_table_lock);
        page_cache_release(new_page);
        page_cache_release(old_page);
+
+       /* Caller expects lock to be held */
+       spin_lock(&mm->page_table_lock);
        return 0;
 }
 
@@ -2741,6 +2755,7 @@ retry:
                                        goto retry;
                                goto out;
                        }
+                       ClearPagePrivate(page);
 
                        spin_lock(&inode->i_lock);
                        inode->i_blocks += blocks_per_huge_page(h);
@@ -2787,8 +2802,10 @@ retry:
        if (!huge_pte_none(huge_ptep_get(ptep)))
                goto backout;
 
-       if (anon_rmap)
+       if (anon_rmap) {
+               ClearPagePrivate(page);
                hugepage_add_new_anon_rmap(page, vma, address);
+       }
        else
                page_dup_rmap(page);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)