From 6c037149014027d50175da5be4ae4531374dcbe0 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 24 Feb 2021 12:09:04 -0800 Subject: [PATCH] hugetlb: convert PageHugeFreed to HPageFreed flag Use new hugetlb specific HPageFreed flag to replace the PageHugeFreed interfaces. Link: https://lkml.kernel.org/r/20210122195231.324857-6-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Reviewed-by: Oscar Salvador Reviewed-by: Muchun Song Acked-by: Michal Hocko Cc: David Hildenbrand Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 3 +++ mm/hugetlb.c | 23 ++++------------------- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e97498a..eb2682f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -487,11 +487,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * allocator. Typically used for migration target pages when no pages * are available in the pool. The hugetlb free page path will * immediately free pages with this flag set to the buddy allocator. + * HPG_freed - Set when page is on the free lists. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable, HPG_temporary, + HPG_freed, __NR_HPAGEFLAGS, }; @@ -536,6 +538,7 @@ static inline void ClearHPage##uname(struct page *page) \ HPAGEFLAG(RestoreReserve, restore_reserve) HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) +HPAGEFLAG(Freed, freed) #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4d7d453..5377e1d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -79,21 +79,6 @@ DEFINE_SPINLOCK(hugetlb_lock); static int num_fault_mutexes; struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; -static inline bool PageHugeFreed(struct page *head) -{ - return page_private(head + 4) == -1UL; -} - -static inline void SetPageHugeFreed(struct page *head) -{ - set_page_private(head + 4, -1UL); -} - -static inline void ClearPageHugeFreed(struct page *head) -{ - set_page_private(head + 4, 0); -} - /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); @@ -1053,7 +1038,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) list_move(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; - SetPageHugeFreed(page); + SetHPageFreed(page); } static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) @@ -1070,7 +1055,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) list_move(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); - ClearPageHugeFreed(page); + ClearHPageFreed(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return page; @@ -1485,7 +1470,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) spin_lock(&hugetlb_lock); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; - ClearPageHugeFreed(page); + ClearHPageFreed(page); spin_unlock(&hugetlb_lock); } @@ -1756,7 +1741,7 @@ retry: * We should make sure that the page is already on the free list * when it is dissolved. */ - if (unlikely(!PageHugeFreed(head))) { + if (unlikely(!HPageFreed(head))) { spin_unlock(&hugetlb_lock); cond_resched(); -- 2.7.4