mm: reintroduce and deprecate rlimit based access for SHM_HUGETLB
[platform/kernel/linux-starfive.git] / mm / memory.c
index d7df5ba..5b4ad5e 100644 (file)
@@ -1151,6 +1151,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
                        set_page_dirty(page);
+               /*
+                * pte_mkyoung() would be more correct here, but atomic care
+                * is needed to avoid losing the dirty bit: it is easier to use
+                * mark_page_accessed().
+                */
                mark_page_accessed(page);
        }
 unlock:
@@ -1667,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         */
        if (addr == vma->vm_start && end == vma->vm_end) {
                vma->vm_pgoff = pfn;
-               vma->vm_flags |= VM_PFNMAP_AT_MMAP;
+               vma->vm_flags |= VM_PFN_AT_MMAP;
        } else if (is_cow_mapping(vma->vm_flags))
                return -EINVAL;
 
@@ -1680,7 +1685,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                 * needed from higher level routine calling unmap_vmas
                 */
                vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
-               vma->vm_flags &= ~VM_PFNMAP_AT_MMAP;
+               vma->vm_flags &= ~VM_PFN_AT_MMAP;
                return -EINVAL;
        }
 
@@ -2435,8 +2440,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
-       mark_page_accessed(page);
-
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);