mm/hugetlb: handle UFFDIO_WRITEPROTECT
authorPeter Xu <peterx@redhat.com>
Fri, 13 May 2022 03:22:54 +0000 (20:22 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:11 +0000 (07:20 -0700)
This starts from passing cp_flags into hugetlb_change_protection() so
hugetlb will be able to handle MM_CP_UFFD_WP[_RESOLVE] requests.

huge_pte_clear_uffd_wp() is introduced to handle the case where the
UFFDIO_WRITEPROTECT is requested upon migrating huge page entries.

Link: https://lkml.kernel.org/r/20220405014906.14708-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/mprotect.c
mm/userfaultfd.c

index 159b253..f1143f1 100644 (file)
@@ -211,7 +211,8 @@ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
 int pmd_huge(pmd_t pmd);
 int pud_huge(pud_t pud);
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
-               unsigned long address, unsigned long end, pgprot_t newprot);
+               unsigned long address, unsigned long end, pgprot_t newprot,
+               unsigned long cp_flags);
 
 bool is_hugetlb_entry_migration(pte_t pte);
 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
@@ -397,7 +398,8 @@ static inline void move_hugetlb_state(struct page *oldpage,
 
 static inline unsigned long hugetlb_change_protection(
                        struct vm_area_struct *vma, unsigned long address,
-                       unsigned long end, pgprot_t newprot)
+                       unsigned long end, pgprot_t newprot,
+                       unsigned long cp_flags)
 {
        return 0;
 }
index 2550b43..4b8f413 100644 (file)
@@ -6233,7 +6233,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
-               unsigned long address, unsigned long end, pgprot_t newprot)
+               unsigned long address, unsigned long end,
+               pgprot_t newprot, unsigned long cp_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long start = address;
@@ -6243,6 +6244,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        unsigned long pages = 0;
        bool shared_pmd = false;
        struct mmu_notifier_range range;
+       bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
+       bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 
        /*
         * In the case of shared PMDs, the area to flush could be beyond
@@ -6289,6 +6292,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                                        entry = make_readable_migration_entry(
                                                                swp_offset(entry));
                                newpte = swp_entry_to_pte(entry);
+                               if (uffd_wp)
+                                       newpte = pte_swp_mkuffd_wp(newpte);
+                               else if (uffd_wp_resolve)
+                                       newpte = pte_swp_clear_uffd_wp(newpte);
                                set_huge_swap_pte_at(mm, address, ptep,
                                                     newpte, huge_page_size(h));
                                pages++;
@@ -6303,6 +6310,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                        old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
                        pte = huge_pte_modify(old_pte, newprot);
                        pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
+                       if (uffd_wp)
+                               pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
+                       else if (uffd_wp_resolve)
+                               pte = huge_pte_clear_uffd_wp(pte);
                        huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
                        pages++;
                }
index cccad79..ba55926 100644 (file)
@@ -460,7 +460,8 @@ unsigned long change_protection(struct mmu_gather *tlb,
        BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
 
        if (is_vm_hugetlb_page(vma))
-               pages = hugetlb_change_protection(vma, start, end, newprot);
+               pages = hugetlb_change_protection(vma, start, end, newprot,
+                                                 cp_flags);
        else
                pages = change_protection_range(tlb, vma, start, end, newprot,
                                                cp_flags);
index be2a61f..01edc18 100644 (file)
@@ -705,6 +705,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
                        atomic_t *mmap_changing)
 {
        struct vm_area_struct *dst_vma;
+       unsigned long page_mask;
        struct mmu_gather tlb;
        pgprot_t newprot;
        int err;
@@ -742,6 +743,13 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
        if (!vma_is_anonymous(dst_vma))
                goto out_unlock;
 
+       if (is_vm_hugetlb_page(dst_vma)) {
+               err = -EINVAL;
+               page_mask = vma_kernel_pagesize(dst_vma) - 1;
+               if ((start & page_mask) || (len & page_mask))
+                       goto out_unlock;
+       }
+
        if (enable_wp)
                newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
        else