From: Matthew Wilcox (Oracle) Date: Thu, 6 Jul 2023 19:52:51 +0000 (+0100) Subject: rmap: pass the folio to __page_check_anon_rmap() X-Git-Tag: v6.6.7~1970^2~443 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=dba438bd7663fefab870a6dd4b01ed0923c32d79;p=platform%2Fkernel%2Flinux-starfive.git rmap: pass the folio to __page_check_anon_rmap() The lone caller already has the folio, so pass it in instead of deriving it from the page again. Link: https://lkml.kernel.org/r/20230706195251.2707542-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- diff --git a/mm/rmap.c b/mm/rmap.c index 0c0d885..2668f5e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1175,14 +1175,14 @@ out: /** * __page_check_anon_rmap - sanity check anonymous rmap addition - * @page: the page to add the mapping to + * @folio: The folio containing @page. + * @page: the page to check the mapping of * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ -static void __page_check_anon_rmap(struct page *page, +static void __page_check_anon_rmap(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address) { - struct folio *folio = page_folio(page); /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. @@ -1262,7 +1262,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); else - __page_check_anon_rmap(page, vma, address); + __page_check_anon_rmap(folio, page, vma, address); } mlock_vma_folio(folio, vma, compound);