mm/gup: remove FOLL_MIGRATION
authorDavid Hildenbrand <david@redhat.com>
Fri, 21 Oct 2022 10:11:41 +0000 (12:11 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Dec 2022 02:12:09 +0000 (18:12 -0800)
Fortunately, the last user (KSM) is gone, so let's just remove this rather
special code from generic GUP handling -- especially because KSM never
required the PMD handling as KSM only deals with individual base pages.

Link: https://lkml.kernel.org/r/20221021101141.84170-10-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/gup.c

index 8df5cae..767c8c5 100644 (file)
@@ -3057,7 +3057,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                                 * and return without waiting upon it */
 #define FOLL_NOFAULT   0x80    /* do not fault in pages */
 #define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
-#define FOLL_MIGRATION 0x400   /* wait for page to replace migration entry */
 #define FOLL_TRIED     0x800   /* a retry, previous pass started an IO */
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_ANON      0x8000  /* don't do file mappings */
index 2860cf4..82b275b 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -537,30 +537,13 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
        if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
                         (FOLL_PIN | FOLL_GET)))
                return ERR_PTR(-EINVAL);
-retry:
        if (unlikely(pmd_bad(*pmd)))
                return no_page_table(vma, flags);
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        pte = *ptep;
-       if (!pte_present(pte)) {
-               swp_entry_t entry;
-               /*
-                * KSM's break_ksm() relies upon recognizing a ksm page
-                * even while it is being migrated, so for that case we
-                * need migration_entry_wait().
-                */
-               if (likely(!(flags & FOLL_MIGRATION)))
-                       goto no_page;
-               if (pte_none(pte))
-                       goto no_page;
-               entry = pte_to_swp_entry(pte);
-               if (!is_migration_entry(entry))
-                       goto no_page;
-               pte_unmap_unlock(ptep, ptl);
-               migration_entry_wait(mm, pmd, address);
-               goto retry;
-       }
+       if (!pte_present(pte))
+               goto no_page;
        if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
                goto no_page;
 
@@ -668,28 +651,8 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
        pmdval = READ_ONCE(*pmd);
        if (pmd_none(pmdval))
                return no_page_table(vma, flags);
-retry:
-       if (!pmd_present(pmdval)) {
-               /*
-                * Should never reach here, if thp migration is not supported;
-                * Otherwise, it must be a thp migration entry.
-                */
-               VM_BUG_ON(!thp_migration_supported() ||
-                                 !is_pmd_migration_entry(pmdval));
-
-               if (likely(!(flags & FOLL_MIGRATION)))
-                       return no_page_table(vma, flags);
-
-               pmd_migration_entry_wait(mm, pmd);
-               pmdval = READ_ONCE(*pmd);
-               /*
-                * MADV_DONTNEED may convert the pmd to null because
-                * mmap_lock is held in read mode
-                */
-               if (pmd_none(pmdval))
-                       return no_page_table(vma, flags);
-               goto retry;
-       }
+       if (!pmd_present(pmdval))
+               return no_page_table(vma, flags);
        if (pmd_devmap(pmdval)) {
                ptl = pmd_lock(mm, pmd);
                page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
@@ -703,18 +666,10 @@ retry:
        if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
                return no_page_table(vma, flags);
 
-retry_locked:
        ptl = pmd_lock(mm, pmd);
-       if (unlikely(pmd_none(*pmd))) {
-               spin_unlock(ptl);
-               return no_page_table(vma, flags);
-       }
        if (unlikely(!pmd_present(*pmd))) {
                spin_unlock(ptl);
-               if (likely(!(flags & FOLL_MIGRATION)))
-                       return no_page_table(vma, flags);
-               pmd_migration_entry_wait(mm, pmd);
-               goto retry_locked;
+               return no_page_table(vma, flags);
        }
        if (unlikely(!pmd_trans_huge(*pmd))) {
                spin_unlock(ptl);