mm/hugetlb: convert dissolve_free_huge_page() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Tue, 29 Nov 2022 22:50:32 +0000 (14:50 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Dec 2022 02:12:14 +0000 (18:12 -0800)
Removes compound_head() call by using a folio rather than a head page.

Link: https://lkml.kernel.org/r/20221129225039.82257-4-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 5960a05..d02293f 100644 (file)
@@ -2128,21 +2128,21 @@ static struct page *remove_pool_huge_page(struct hstate *h,
 int dissolve_free_huge_page(struct page *page)
 {
        int rc = -EBUSY;
+       struct folio *folio = page_folio(page);
 
 retry:
        /* Not to disrupt normal path by vainly holding hugetlb_lock */
-       if (!PageHuge(page))
+       if (!folio_test_hugetlb(folio))
                return 0;
 
        spin_lock_irq(&hugetlb_lock);
-       if (!PageHuge(page)) {
+       if (!folio_test_hugetlb(folio)) {
                rc = 0;
                goto out;
        }
 
-       if (!page_count(page)) {
-               struct page *head = compound_head(page);
-               struct hstate *h = page_hstate(head);
+       if (!folio_ref_count(folio)) {
+               struct hstate *h = folio_hstate(folio);
                if (!available_huge_pages(h))
                        goto out;
 
@@ -2150,7 +2150,7 @@ retry:
                 * We should make sure that the page is already on the free list
                 * when it is dissolved.
                 */
-               if (unlikely(!HPageFreed(head))) {
+               if (unlikely(!folio_test_hugetlb_freed(folio))) {
                        spin_unlock_irq(&hugetlb_lock);
                        cond_resched();
 
@@ -2165,7 +2165,7 @@ retry:
                        goto retry;
                }
 
-               remove_hugetlb_page(h, head, false);
+               remove_hugetlb_page(h, &folio->page, false);
                h->max_huge_pages--;
                spin_unlock_irq(&hugetlb_lock);
 
@@ -2177,12 +2177,12 @@ retry:
                 * Attempt to allocate vmemmmap here so that we can take
                 * appropriate action on failure.
                 */
-               rc = hugetlb_vmemmap_restore(h, head);
+               rc = hugetlb_vmemmap_restore(h, &folio->page);
                if (!rc) {
-                       update_and_free_page(h, head, false);
+                       update_and_free_page(h, &folio->page, false);
                } else {
                        spin_lock_irq(&hugetlb_lock);
-                       add_hugetlb_page(h, head, false);
+                       add_hugetlb_page(h, &folio->page, false);
                        h->max_huge_pages++;
                        spin_unlock_irq(&hugetlb_lock);
                }