1 // SPDX-License-Identifier: GPL-2.0
3 * Optimize vmemmap pages associated with HugeTLB
5 * Copyright (c) 2020, Bytedance. All rights reserved.
7 * Author: Muchun Song <songmuchun@bytedance.com>
9 * See Documentation/vm/vmemmap_dedup.rst
11 #define pr_fmt(fmt) "HugeTLB: " fmt
13 #include <linux/memory_hotplug.h>
14 #include "hugetlb_vmemmap.h"
17 * There are a lot of struct page structures associated with each HugeTLB page.
18 * For tail pages, the value of compound_head is the same. So we can reuse first
19 * page of head page structures. We map the virtual addresses of all the pages
20 * of tail page structures to the head page struct, and then free these page
21 * frames. Therefore, we need to reserve one pages as vmemmap areas.
23 #define RESERVE_VMEMMAP_NR 1U
24 #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
26 enum vmemmap_optimize_mode {
31 DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
32 hugetlb_optimize_vmemmap_key);
33 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
35 static enum vmemmap_optimize_mode vmemmap_optimize_mode =
36 IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
38 static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
40 if (vmemmap_optimize_mode == to)
43 if (to == VMEMMAP_OPTIMIZE_OFF)
44 static_branch_dec(&hugetlb_optimize_vmemmap_key);
46 static_branch_inc(&hugetlb_optimize_vmemmap_key);
47 WRITE_ONCE(vmemmap_optimize_mode, to);
50 static int __init hugetlb_vmemmap_early_param(char *buf)
53 enum vmemmap_optimize_mode mode;
55 if (kstrtobool(buf, &enable))
58 mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF;
59 vmemmap_optimize_mode_switch(mode);
63 early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
66 * Previously discarded vmemmap pages will be allocated and remapping
67 * after this function returns zero.
69 int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
72 unsigned long vmemmap_addr = (unsigned long)head;
73 unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
75 if (!HPageVmemmapOptimized(head))
78 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
79 vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
80 vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
81 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
84 * The pages which the vmemmap virtual address range [@vmemmap_addr,
85 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
86 * the range is mapped to the page which @vmemmap_reuse is mapped to.
87 * When a HugeTLB page is freed to the buddy allocator, previously
88 * discarded vmemmap pages must be allocated and remapping.
90 ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
91 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
93 ClearHPageVmemmapOptimized(head);
94 static_branch_dec(&hugetlb_optimize_vmemmap_key);
100 void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
102 unsigned long vmemmap_addr = (unsigned long)head;
103 unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
105 vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
109 if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF)
112 static_branch_inc(&hugetlb_optimize_vmemmap_key);
114 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
115 vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
116 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
119 * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
120 * to the page which @vmemmap_reuse is mapped to, then free the pages
121 * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
123 if (vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
124 static_branch_dec(&hugetlb_optimize_vmemmap_key);
126 SetHPageVmemmapOptimized(head);
129 void __init hugetlb_vmemmap_init(struct hstate *h)
131 unsigned int nr_pages = pages_per_huge_page(h);
132 unsigned int vmemmap_pages;
135 * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
136 * page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP,
137 * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
139 BUILD_BUG_ON(__NR_USED_SUBPAGE >=
140 RESERVE_VMEMMAP_SIZE / sizeof(struct page));
142 if (!is_power_of_2(sizeof(struct page))) {
143 pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
144 static_branch_disable(&hugetlb_optimize_vmemmap_key);
148 vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
150 * The head page is not to be freed to buddy allocator, the other tail
151 * pages will map to the head page, so they can be freed.
153 * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
154 * on some architectures (e.g. aarch64). See Documentation/arm64/
155 * hugetlbpage.rst for more details.
157 if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
158 h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
160 pr_info("can optimize %d vmemmap pages for %s\n",
161 h->optimize_vmemmap_pages, h->name);
164 #ifdef CONFIG_PROC_SYSCTL
165 static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write,
166 void *buffer, size_t *length,
170 enum vmemmap_optimize_mode mode;
171 static DEFINE_MUTEX(sysctl_mutex);
173 if (write && !capable(CAP_SYS_ADMIN))
176 mutex_lock(&sysctl_mutex);
177 mode = vmemmap_optimize_mode;
179 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
181 vmemmap_optimize_mode_switch(mode);
182 mutex_unlock(&sysctl_mutex);
187 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
189 .procname = "hugetlb_optimize_vmemmap",
190 .maxlen = sizeof(enum vmemmap_optimize_mode),
192 .proc_handler = hugetlb_optimize_vmemmap_handler,
193 .extra1 = SYSCTL_ZERO,
194 .extra2 = SYSCTL_ONE,
199 static __init int hugetlb_vmemmap_sysctls_init(void)
202 * If "memory_hotplug.memmap_on_memory" is enabled or "struct page"
203 * crosses page boundaries, the vmemmap pages cannot be optimized.
205 if (!mhp_memmap_on_memory() && is_power_of_2(sizeof(struct page)))
206 register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
210 late_initcall(hugetlb_vmemmap_sysctls_init);
211 #endif /* CONFIG_PROC_SYSCTL */