mm: hugetlb_vmemmap: cleanup CONFIG_HUGETLB_PAGE_FREE_VMEMMAP*
authorMuchun Song <songmuchun@bytedance.com>
Fri, 29 Apr 2022 06:16:15 +0000 (23:16 -0700)
committerakpm <akpm@linux-foundation.org>
Fri, 29 Apr 2022 06:16:15 +0000 (23:16 -0700)
The word of "free" is not expressive enough to express the feature of
optimizing vmemmap pages associated with each HugeTLB, rename this keywork
to "optimize".  In this patch , cheanup configs to make code more
expressive.

Link: https://lkml.kernel.org/r/20220404074652.68024-4-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
14 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/mm/hugetlbpage.rst
arch/arm64/Kconfig
arch/arm64/mm/flush.c
arch/x86/Kconfig
arch/x86/mm/init_64.c
fs/Kconfig
include/linux/hugetlb.h
include/linux/mm.h
include/linux/page-flags.h
mm/Makefile
mm/hugetlb_vmemmap.c
mm/hugetlb_vmemmap.h
mm/sparse-vmemmap.c

index 3f1cc5e..d9b577d 100644 (file)
                        Format: size[KMG]
 
        hugetlb_free_vmemmap=
-                       [KNL] Reguires CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+                       [KNL] Reguires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
                        enabled.
                        Allows heavy hugetlb users to free up some more
                        memory (7 * PAGE_SIZE for each 2MB hugetlb page).
                        on:  enable the feature
                        off: disable the feature
 
-                       Built with CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON=y,
+                       Built with CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y,
                        the default is on.
 
                        This is not compatible with memory_hotplug.memmap_on_memory.
index 0166f9d..a90330d 100644 (file)
@@ -164,7 +164,7 @@ default_hugepagesz
        will all result in 256 2M huge pages being allocated.  Valid default
        huge page size is architecture dependent.
 hugetlb_free_vmemmap
-       When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this enables freeing
+       When CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is set, this enables optimizing
        unused vmemmap pages associated with each HugeTLB page.
 
 When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
index 8da1f65..cc04986 100644 (file)
@@ -97,7 +97,7 @@ config ARM64
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
-       select ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP
+       select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_NO_INSTR
        select ARCH_HAS_UBSAN_SANITIZE_ALL
index bbb2640..fc4f710 100644 (file)
@@ -78,7 +78,7 @@ void flush_dcache_page(struct page *page)
        /*
         * Only the head page's flags of HugeTLB can be cleared since the tail
         * vmemmap pages associated with each HugeTLB page are mapped with
-        * read-only when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is enabled (more
+        * read-only when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is enabled (more
         * details can refer to vmemmap_remap_pte()).  Although
         * __sync_icache_dcache() only set PG_dcache_clean flag on the head
         * page struct, there is more than one page struct with PG_dcache_clean
index 5b77b43..da07c80 100644 (file)
@@ -121,7 +121,7 @@ config X86
        select ARCH_WANTS_NO_INSTR
        select ARCH_WANT_GENERAL_HUGETLB
        select ARCH_WANT_HUGE_PMD_SHARE
-       select ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP      if X86_64
+       select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP  if X86_64
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_THP_SWAP              if X86_64
        select ARCH_HAS_PARANOID_L1D_FLUSH
index 96d34eb..af34dd1 100644 (file)
@@ -1269,7 +1269,7 @@ static struct kcore_list kcore_vsyscall;
 
 static void __init register_page_bootmem_info(void)
 {
-#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
+#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)
        int i;
 
        for_each_online_node(i)
index 8f6ab1c..5976eb3 100644 (file)
@@ -250,22 +250,22 @@ config HUGETLB_PAGE
 # to enable the feature of minimizing overhead of struct page associated with
 # each HugeTLB page.
 #
-config ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP
+config ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        bool
 
-config HUGETLB_PAGE_FREE_VMEMMAP
+config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        def_bool HUGETLB_PAGE
-       depends on ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP
+       depends on ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        depends on SPARSEMEM_VMEMMAP
 
-config HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON
-       bool "Default freeing vmemmap pages of HugeTLB to on"
+config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
+       bool "Default optimizing vmemmap pages of HugeTLB to on"
        default n
-       depends on HUGETLB_PAGE_FREE_VMEMMAP
+       depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        help
-         When using HUGETLB_PAGE_FREE_VMEMMAP, the freeing unused vmemmap
+         When using HUGETLB_PAGE_OPTIMIZE_VMEMMAP, the optimizing unused vmemmap
          pages associated with each HugeTLB page is default off. Say Y here
-         to enable freeing vmemmap pages of HugeTLB by default. It can then
+         to enable optimizing vmemmap pages of HugeTLB by default. It can then
          be disabled on the command line via hugetlb_free_vmemmap=off.
 
 config MEMFD_CREATE
index 189bddb..ac2ece9 100644 (file)
@@ -623,7 +623,7 @@ struct hstate {
        unsigned int nr_huge_pages_node[MAX_NUMNODES];
        unsigned int free_huge_pages_node[MAX_NUMNODES];
        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        unsigned int optimize_vmemmap_pages;
 #endif
 #ifdef CONFIG_CGROUP_HUGETLB
index 16d6b46..a8f4c7e 100644 (file)
@@ -3145,7 +3145,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
 }
 #endif
 
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 int vmemmap_remap_free(unsigned long start, unsigned long end,
                       unsigned long reuse);
 int vmemmap_remap_alloc(unsigned long start, unsigned long end,
index 573f499..1ea8968 100644 (file)
@@ -190,13 +190,13 @@ enum pageflags {
 
 #ifndef __GENERATING_BOUNDS_H
 
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
-DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
                         hugetlb_optimize_vmemmap_key);
 
 static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
 {
-       return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
+       return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
                                   &hugetlb_optimize_vmemmap_key);
 }
 
index 4cc13f3..6f9ffa9 100644 (file)
@@ -77,7 +77,7 @@ obj-$(CONFIG_FRONTSWAP)       += frontswap.o
 obj-$(CONFIG_ZSWAP)    += zswap.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
 obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
-obj-$(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)        += hugetlb_vmemmap.o
+obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)    += hugetlb_vmemmap.o
 obj-$(CONFIG_NUMA)     += mempolicy.o
 obj-$(CONFIG_SPARSEMEM)        += sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
index f252949..2655434 100644 (file)
 #define RESERVE_VMEMMAP_NR             1U
 #define RESERVE_VMEMMAP_SIZE           (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 
-DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
+DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
                        hugetlb_optimize_vmemmap_key);
 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 
@@ -276,7 +276,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
 
        /*
         * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
-        * page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
+        * page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP,
         * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
         */
        BUILD_BUG_ON(__NR_USED_SUBPAGE >=
index 9760537..109b0a5 100644 (file)
@@ -10,7 +10,7 @@
 #define _LINUX_HUGETLB_VMEMMAP_H
 #include <linux/hugetlb.h>
 
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
 void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
 void hugetlb_vmemmap_init(struct hstate *h);
@@ -41,5 +41,5 @@ static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
 {
        return 0;
 }
-#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
+#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
index 8aecd6b..52f3652 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 /**
  * struct vmemmap_remap_walk - walk vmemmap page table
  *
@@ -420,7 +420,7 @@ int vmemmap_remap_alloc(unsigned long start, unsigned long end,
 
        return 0;
 }
-#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
+#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
 
 /*
  * Allocate a block of memory to be used to back the virtual memory map