thp: fix copy_page_rep GPF by testing is_huge_zero_pmd once only
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / huge_memory.c
index 33a5dc4..95d1acb 100644 (file)
@@ -882,6 +882,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                ret = 0;
                goto out_unlock;
        }
+
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(src_ptl);
@@ -1153,7 +1154,7 @@ alloc:
                new_page = NULL;
 
        if (unlikely(!new_page)) {
-               if (is_huge_zero_pmd(orig_pmd)) {
+               if (!page) {
                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
                                        address, pmd, orig_pmd, haddr);
                } else {
@@ -1180,7 +1181,7 @@ alloc:
 
        count_vm_event(THP_FAULT_ALLOC);
 
-       if (is_huge_zero_pmd(orig_pmd))
+       if (!page)
                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
        else
                copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1206,7 +1207,7 @@ alloc:
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
-               if (is_huge_zero_pmd(orig_pmd)) {
+               if (!page) {
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                        put_huge_zero_page();
                } else {
@@ -1243,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
                return ERR_PTR(-EFAULT);
 
+       /* Full NUMA hinting faults to serialise migration in fault paths */
+       if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+               goto out;
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
@@ -1295,6 +1300,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(!pmd_same(pmd, *pmdp)))
                goto out_unlock;
 
+       /*
+        * If there are potential migrations, wait for completion and retry
+        * without disrupting NUMA hinting information. Do not relock and
+        * check_same as the page may no longer be mapped.
+        */
+       if (unlikely(pmd_trans_migrating(*pmdp))) {
+               spin_unlock(ptl);
+               wait_migrate_huge_page(vma->anon_vma, pmdp);
+               goto out;
+       }
+
        page = pmd_page(pmd);
        BUG_ON(is_huge_zero_page(page));
        page_nid = page_to_nid(page);
@@ -1323,23 +1339,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* If the page was locked, there are no parallel migrations */
                if (page_locked)
                        goto clear_pmdnuma;
+       }
 
-               /*
-                * Otherwise wait for potential migrations and retry. We do
-                * relock and check_same as the page may no longer be mapped.
-                * As the fault is being retried, do not account for it.
-                */
+       /* Migration could have started since the pmd_trans_migrating check */
+       if (!page_locked) {
                spin_unlock(ptl);
                wait_on_page_locked(page);
                page_nid = -1;
                goto out;
        }
 
-       /* Page is misplaced, serialise migrations and parallel THP splits */
+       /*
+        * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
+        * to serialises splits
+        */
        get_page(page);
        spin_unlock(ptl);
-       if (!page_locked)
-               lock_page(page);
        anon_vma = page_lock_anon_vma_read(page);
 
        /* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1366,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_unlock;
        }
 
+       /* Bail if we fail to protect against THP splits for any reason */
+       if (unlikely(!anon_vma)) {
+               put_page(page);
+               page_nid = -1;
+               goto clear_pmdnuma;
+       }
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and pmd_numa cleared.
@@ -1517,6 +1539,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                ret = 1;
                if (!prot_numa) {
                        entry = pmdp_get_and_clear(mm, addr, pmd);
+                       if (pmd_numa(entry))
+                               entry = pmd_mknonnuma(entry);
                        entry = pmd_modify(entry, newprot);
                        ret = HPAGE_PMD_NR;
                        BUG_ON(pmd_write(entry));
@@ -1531,7 +1555,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                         */
                        if (!is_huge_zero_page(page) &&
                            !pmd_numa(*pmd)) {
-                               entry = pmdp_get_and_clear(mm, addr, pmd);
+                               entry = *pmd;
                                entry = pmd_mknuma(entry);
                                ret = HPAGE_PMD_NR;
                        }