spec: Build rt packages
[platform/kernel/linux-rpi.git] / mm / hmm.c
index 842e265..3af995c 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -212,14 +212,6 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
                unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static inline bool hmm_is_device_private_entry(struct hmm_range *range,
-               swp_entry_t entry)
-{
-       return is_device_private_entry(entry) &&
-               pfn_swap_entry_to_page(entry)->pgmap->owner ==
-               range->dev_private_owner;
-}
-
 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
                                                 pte_t pte)
 {
@@ -252,10 +244,12 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
                swp_entry_t entry = pte_to_swp_entry(pte);
 
                /*
-                * Never fault in device private pages, but just report
-                * the PFN even if not present.
+                * Don't fault in device private pages owned by the caller,
+                * just report the PFN.
                 */
-               if (hmm_is_device_private_entry(range, entry)) {
+               if (is_device_private_entry(entry) &&
+                   pfn_swap_entry_to_page(entry)->pgmap->owner ==
+                   range->dev_private_owner) {
                        cpu_flags = HMM_PFN_VALID;
                        if (is_writable_device_private_entry(entry))
                                cpu_flags |= HMM_PFN_WRITE;
@@ -273,6 +267,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
                if (!non_swap_entry(entry))
                        goto fault;
 
+               if (is_device_private_entry(entry))
+                       goto fault;
+
                if (is_device_exclusive_entry(entry))
                        goto fault;
 
@@ -300,7 +297,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
         * Since each architecture defines a struct page for the zero page, just
         * fall through and treat it like a normal page.
         */
-       if (pte_special(pte) && !pte_devmap(pte) &&
+       if (!vm_normal_page(walk->vma, addr, pte) &&
+           !pte_devmap(pte) &&
            !is_zero_pfn(pte_pfn(pte))) {
                if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
                        pte_unmap(ptep);
@@ -518,7 +516,7 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end,
        struct hmm_range *range = hmm_vma_walk->range;
        struct vm_area_struct *vma = walk->vma;
 
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
            vma->vm_flags & VM_READ)
                return 0;