rmap: convert page_move_anon_rmap() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:45 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:53 +0000 (14:02 -0700)
Removes one call to compound_head() and a reference to page->mapping.

Link: https://lkml.kernel.org/r/20220902194653.1739778-50-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index 2ff17b9aabd9bf0b5d062503b60d923bda495a51..d44ff516a208932caba289484e02504ac57d61ab 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1099,22 +1099,20 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
  */
 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 {
-       struct anon_vma *anon_vma = vma->anon_vma;
-       struct page *subpage = page;
-
-       page = compound_head(page);
+       void *anon_vma = vma->anon_vma;
+       struct folio *folio = page_folio(page);
 
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
        VM_BUG_ON_VMA(!anon_vma, vma);
 
-       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       anon_vma += PAGE_MAPPING_ANON;
        /*
         * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
         * simultaneously, so a concurrent reader (eg folio_referenced()'s
         * folio_test_anon()) will not see one without the other.
         */
-       WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
-       SetPageAnonExclusive(subpage);
+       WRITE_ONCE(folio->mapping, anon_vma);
+       SetPageAnonExclusive(page);
 }
 
 /**