/**
* __page_set_anon_rmap - set up new anonymous rmap
- * @page: Page or Hugepage to add to rmap
+ * @folio: Folio which contains page.
+ * @page: Page to add to rmap.
* @vma: VM area to add page to.
* @address: User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process
*/
-static void __page_set_anon_rmap(struct page *page,
+static void __page_set_anon_rmap(struct folio *folio, struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
- if (PageAnon(page))
+ if (folio_test_anon(folio))
goto out;
/*
anon_vma = anon_vma->root;
/*
- * page_idle does a lockless/optimistic rmap scan on page->mapping.
+ * page_idle does a lockless/optimistic rmap scan on folio->mapping.
* Make sure the compiler doesn't split the stores of anon_vma and
* the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
* could mistake the mapping for a struct address_space and crash.
*/
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
- page->index = linear_page_index(vma, address);
+ WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
+ folio->index = linear_page_index(vma, address);
out:
if (exclusive)
SetPageAnonExclusive(page);
if (likely(!folio_test_ksm(folio))) {
/* address might be in next vma when migration races vma_adjust */
if (first)
- __page_set_anon_rmap(page, vma, address,
+ __page_set_anon_rmap(folio, page, vma, address,
!!(flags & RMAP_EXCLUSIVE));
else
__page_check_anon_rmap(page, vma, address);
}
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
- __page_set_anon_rmap(&folio->page, vma, address, 1);
+ __page_set_anon_rmap(folio, &folio->page, vma, address, 1);
}
/**
VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
if (first)
- __page_set_anon_rmap(page, vma, address,
+ __page_set_anon_rmap(folio, page, vma, address,
!!(flags & RMAP_EXCLUSIVE));
}
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
folio_clear_hugetlb_restore_reserve(folio);
- __page_set_anon_rmap(page, vma, address, 1);
+ __page_set_anon_rmap(folio, page, vma, address, 1);
}
#endif /* CONFIG_HUGETLB_PAGE */