mm/hugetlb: Use try_grab_folio() instead of try_grab_compound_head()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 8 Jan 2022 05:15:04 +0000 (00:15 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 21 Mar 2022 16:56:35 +0000 (12:56 -0400)
follow_hugetlb_page() only cares about success or failure, so it doesn't
need to know the type of the returned pointer, only whether it's NULL
or not.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
include/linux/mm.h
mm/gup.c
mm/hugetlb.c

index b764057..dca5c99 100644 (file)
@@ -1124,9 +1124,6 @@ static inline void get_page(struct page *page)
 }
 
 bool __must_check try_grab_page(struct page *page, unsigned int flags);
-struct page *try_grab_compound_head(struct page *page, int refs,
-                                   unsigned int flags);
-
 
 static inline __must_check bool try_get_page(struct page *page)
 {
index cbbddcf..0140041 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -133,7 +133,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
        return NULL;
 }
 
-struct page *try_grab_compound_head(struct page *page,
+static inline struct page *try_grab_compound_head(struct page *page,
                int refs, unsigned int flags)
 {
        return &try_grab_folio(page, refs, flags)->page;
index 785d6e3..10203f3 100644 (file)
@@ -6076,7 +6076,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
                if (pages) {
                        /*
-                        * try_grab_compound_head() should always succeed here,
+                        * try_grab_folio() should always succeed here,
                         * because: a) we hold the ptl lock, and b) we've just
                         * checked that the huge page is present in the page
                         * tables. If the huge page is present, then the tail
@@ -6085,9 +6085,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         * any way. So this page must be available at this
                         * point, unless the page refcount overflowed:
                         */
-                       if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
-                                                                refs,
-                                                                flags))) {
+                       if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
+                                                        flags))) {
                                spin_unlock(ptl);
                                remainder = 0;
                                err = -ENOMEM;