mm/hugetlb: handle pte markers in page faults
authorPeter Xu <peterx@redhat.com>
Fri, 13 May 2022 03:22:54 +0000 (20:22 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:11 +0000 (07:20 -0700)
Allow hugetlb code to handle pte markers just like none ptes.  It's mostly
there, we just need to make sure we don't assume hugetlb_no_page() only
handles none pte, so when detecting pte change we should use pte_same()
rather than pte_none().  We need to pass in the old_pte to do the
comparison.

Check the original pte to see whether it's a pte marker, if it is, we
should recover uffd-wp bit on the new pte to be installed, so that the
next write will be trapped by uffd.

Link: https://lkml.kernel.org/r/20220405014909.14761-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 4b8f413..25f4ac5 100644 (file)
@@ -5438,7 +5438,8 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                        struct vm_area_struct *vma,
                        struct address_space *mapping, pgoff_t idx,
-                       unsigned long address, pte_t *ptep, unsigned int flags)
+                       unsigned long address, pte_t *ptep,
+                       pte_t old_pte, unsigned int flags)
 {
        struct hstate *h = hstate_vma(vma);
        vm_fault_t ret = VM_FAULT_SIGBUS;
@@ -5565,7 +5566,8 @@ retry:
 
        ptl = huge_pte_lock(h, mm, ptep);
        ret = 0;
-       if (!huge_pte_none(huge_ptep_get(ptep)))
+       /* If pte changed from under us, retry */
+       if (!pte_same(huge_ptep_get(ptep), old_pte))
                goto backout;
 
        if (anon_rmap) {
@@ -5575,6 +5577,12 @@ retry:
                page_dup_file_rmap(page, true);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
+       /*
+        * If this pte was previously wr-protected, keep it wr-protected even
+        * if populated.
+        */
+       if (unlikely(pte_marker_uffd_wp(old_pte)))
+               new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
        set_huge_pte_at(mm, haddr, ptep, new_pte);
 
        hugetlb_count_add(pages_per_huge_page(h), mm);
@@ -5692,8 +5700,10 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
        entry = huge_ptep_get(ptep);
-       if (huge_pte_none(entry)) {
-               ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
+       /* PTE markers should be handled the same way as none pte */
+       if (huge_pte_none_mostly(entry)) {
+               ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
+                                     entry, flags);
                goto out_mutex;
        }