mm: hugetlb_vmemmap: cleanup hugetlb_free_vmemmap_enabled*
authorMuchun Song <songmuchun@bytedance.com>
Fri, 29 Apr 2022 06:16:15 +0000 (23:16 -0700)
committerakpm <akpm@linux-foundation.org>
Fri, 29 Apr 2022 06:16:15 +0000 (23:16 -0700)
The word of "free" is not expressive enough to express the feature of
optimizing vmemmap pages associated with each HugeTLB, rename this keywork
to "optimize".  In this patch , cheanup the static key and
hugetlb_free_vmemmap_enabled() to make code more expressive.

Link: https://lkml.kernel.org/r/20220404074652.68024-3-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/mm/flush.c
include/linux/page-flags.h
mm/hugetlb_vmemmap.c
mm/memory_hotplug.c

index 840d998..bbb2640 100644 (file)
@@ -86,7 +86,7 @@ void flush_dcache_page(struct page *page)
         * is reused (more details can refer to the comments above
         * page_fixed_fake_head()).
         */
-       if (hugetlb_free_vmemmap_enabled() && PageHuge(page))
+       if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page))
                page = compound_head(page);
 
        if (test_bit(PG_dcache_clean, &page->flags))
index 9d8eeaa..573f499 100644 (file)
@@ -192,16 +192,16 @@ enum pageflags {
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
 DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
-                        hugetlb_free_vmemmap_enabled_key);
+                        hugetlb_optimize_vmemmap_key);
 
-static __always_inline bool hugetlb_free_vmemmap_enabled(void)
+static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
 {
        return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
-                                  &hugetlb_free_vmemmap_enabled_key);
+                                  &hugetlb_optimize_vmemmap_key);
 }
 
 /*
- * If the feature of freeing some vmemmap pages associated with each HugeTLB
+ * If the feature of optimizing vmemmap pages associated with each HugeTLB
  * page is enabled, the head vmemmap page frame is reused and all of the tail
  * vmemmap addresses map to the head vmemmap page frame (furture details can
  * refer to the figure at the head of the mm/hugetlb_vmemmap.c).  In other
@@ -218,7 +218,7 @@ static __always_inline bool hugetlb_free_vmemmap_enabled(void)
  */
 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
 {
-       if (!hugetlb_free_vmemmap_enabled())
+       if (!hugetlb_optimize_vmemmap_enabled())
                return page;
 
        /*
@@ -247,7 +247,7 @@ static inline const struct page *page_fixed_fake_head(const struct page *page)
        return page;
 }
 
-static inline bool hugetlb_free_vmemmap_enabled(void)
+static inline bool hugetlb_optimize_vmemmap_enabled(void)
 {
        return false;
 }
index 91b79b9..f252949 100644 (file)
 #define RESERVE_VMEMMAP_SIZE           (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 
 DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
-                       hugetlb_free_vmemmap_enabled_key);
-EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
+                       hugetlb_optimize_vmemmap_key);
+EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 
 static int __init hugetlb_vmemmap_early_param(char *buf)
 {
@@ -204,9 +204,9 @@ static int __init hugetlb_vmemmap_early_param(char *buf)
                return -EINVAL;
 
        if (!strcmp(buf, "on"))
-               static_branch_enable(&hugetlb_free_vmemmap_enabled_key);
+               static_branch_enable(&hugetlb_optimize_vmemmap_key);
        else if (!strcmp(buf, "off"))
-               static_branch_disable(&hugetlb_free_vmemmap_enabled_key);
+               static_branch_disable(&hugetlb_optimize_vmemmap_key);
        else
                return -EINVAL;
 
@@ -282,7 +282,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
        BUILD_BUG_ON(__NR_USED_SUBPAGE >=
                     RESERVE_VMEMMAP_SIZE / sizeof(struct page));
 
-       if (!hugetlb_free_vmemmap_enabled())
+       if (!hugetlb_optimize_vmemmap_enabled())
                return;
 
        vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
index 416b38c..678a449 100644 (file)
@@ -1289,7 +1289,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
         *       populate a single PMD.
         */
        return memmap_on_memory &&
-              !hugetlb_free_vmemmap_enabled() &&
+              !hugetlb_optimize_vmemmap_enabled() &&
               IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
               size == memory_block_size_bytes() &&
               IS_ALIGNED(vmemmap_size, PMD_SIZE) &&