hugetlbfs: close race between MADV_DONTNEED and page fault
[platform/kernel/linux-starfive.git] / mm / hugetlb.c
index d218ee2..1301ba7 100644 (file)
@@ -5306,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
        return len + old_addr - old_end;
 }
 
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+                           unsigned long start, unsigned long end,
+                           struct page *ref_page, zap_flags_t zap_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -5437,16 +5437,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                tlb_flush_mmu_tlbonly(tlb);
 }
 
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
-                         struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end, struct page *ref_page,
-                         zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+                        unsigned long *start, unsigned long *end)
 {
+       if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
+               return;
+
+       adjust_range_if_pmd_sharing_possible(vma, start, end);
        hugetlb_vma_lock_write(vma);
-       i_mmap_lock_write(vma->vm_file->f_mapping);
+       if (vma->vm_file)
+               i_mmap_lock_write(vma->vm_file->f_mapping);
+}
 
-       /* mmu notification performed in caller */
-       __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+                      struct zap_details *details)
+{
+       zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+       if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
+               return;
 
        if (zap_flags & ZAP_FLAG_UNMAP) {       /* final unmap */
                /*
@@ -5459,11 +5468,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                 * someone else.
                 */
                __hugetlb_vma_unlock_write_free(vma);
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
        } else {
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
                hugetlb_vma_unlock_write(vma);
        }
+
+       if (vma->vm_file)
+               i_mmap_unlock_write(vma->vm_file->f_mapping);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,