1 // SPDX-License-Identifier: GPL-2.0
3 * Free some vmemmap pages of HugeTLB
5 * Copyright (c) 2020, Bytedance. All rights reserved.
7 * Author: Muchun Song <songmuchun@bytedance.com>
9 * The struct page structures (page structs) are used to describe a physical
10 * page frame. By default, there is a one-to-one mapping from a page frame to
11 * it's corresponding page struct.
13 * HugeTLB pages consist of multiple base page size pages and is supported by
14 * many architectures. See hugetlbpage.rst in the Documentation directory for
15 * more details. On the x86-64 architecture, HugeTLB pages of size 2MB and 1GB
16 * are currently supported. Since the base page size on x86 is 4KB, a 2MB
17 * HugeTLB page consists of 512 base pages and a 1GB HugeTLB page consists of
18 * 4096 base pages. For each base page, there is a corresponding page struct.
20 * Within the HugeTLB subsystem, only the first 4 page structs are used to
21 * contain unique information about a HugeTLB page. __NR_USED_SUBPAGE provides
22 * this upper limit. The only 'useful' information in the remaining page structs
23 * is the compound_head field, and this field is the same for all tail pages.
25 * By removing redundant page structs for HugeTLB pages, memory can be returned
26 * to the buddy allocator for other uses.
28 * Different architectures support different HugeTLB pages. For example, the
29 * following table is the HugeTLB page size supported by x86 and arm64
30 * architectures. Because arm64 supports 4k, 16k, and 64k base pages and
31 * supports contiguous entries, so it supports many kinds of sizes of HugeTLB
34 * +--------------+-----------+-----------------------------------------------+
35 * | Architecture | Page Size | HugeTLB Page Size |
36 * +--------------+-----------+-----------+-----------+-----------+-----------+
37 * | x86-64 | 4KB | 2MB | 1GB | | |
38 * +--------------+-----------+-----------+-----------+-----------+-----------+
39 * | | 4KB | 64KB | 2MB | 32MB | 1GB |
40 * | +-----------+-----------+-----------+-----------+-----------+
41 * | arm64 | 16KB | 2MB | 32MB | 1GB | |
42 * | +-----------+-----------+-----------+-----------+-----------+
43 * | | 64KB | 2MB | 512MB | 16GB | |
44 * +--------------+-----------+-----------+-----------+-----------+-----------+
46 * When the system boot up, every HugeTLB page has more than one struct page
47 * structs which size is (unit: pages):
49 * struct_size = HugeTLB_Size / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
51 * Where HugeTLB_Size is the size of the HugeTLB page. We know that the size
52 * of the HugeTLB page is always n times PAGE_SIZE. So we can get the following
55 * HugeTLB_Size = n * PAGE_SIZE
59 * struct_size = n * PAGE_SIZE / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
60 * = n * sizeof(struct page) / PAGE_SIZE
62 * We can use huge mapping at the pud/pmd level for the HugeTLB page.
64 * For the HugeTLB page of the pmd level mapping, then
66 * struct_size = n * sizeof(struct page) / PAGE_SIZE
67 * = PAGE_SIZE / sizeof(pte_t) * sizeof(struct page) / PAGE_SIZE
68 * = sizeof(struct page) / sizeof(pte_t)
72 * Where n is how many pte entries which one page can contains. So the value of
73 * n is (PAGE_SIZE / sizeof(pte_t)).
75 * This optimization only supports 64-bit system, so the value of sizeof(pte_t)
76 * is 8. And this optimization also applicable only when the size of struct page
77 * is a power of two. In most cases, the size of struct page is 64 bytes (e.g.
78 * x86-64 and arm64). So if we use pmd level mapping for a HugeTLB page, the
79 * size of struct page structs of it is 8 page frames which size depends on the
80 * size of the base page.
82 * For the HugeTLB page of the pud level mapping, then
84 * struct_size = PAGE_SIZE / sizeof(pmd_t) * struct_size(pmd)
85 * = PAGE_SIZE / 8 * 8 (pages)
88 * Where the struct_size(pmd) is the size of the struct page structs of a
89 * HugeTLB page of the pmd level mapping.
91 * E.g.: A 2MB HugeTLB page on x86_64 consists in 8 page frames while 1GB
92 * HugeTLB page consists in 4096.
94 * Next, we take the pmd level mapping of the HugeTLB page as an example to
95 * show the internal implementation of this optimization. There are 8 pages
96 * struct page structs associated with a HugeTLB page which is pmd mapped.
98 * Here is how things look before optimization.
100 * HugeTLB struct pages(8 pages) page frame(8 pages)
101 * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
102 * | | | 0 | -------------> | 0 |
103 * | | +-----------+ +-----------+
104 * | | | 1 | -------------> | 1 |
105 * | | +-----------+ +-----------+
106 * | | | 2 | -------------> | 2 |
107 * | | +-----------+ +-----------+
108 * | | | 3 | -------------> | 3 |
109 * | | +-----------+ +-----------+
110 * | | | 4 | -------------> | 4 |
111 * | PMD | +-----------+ +-----------+
112 * | level | | 5 | -------------> | 5 |
113 * | mapping | +-----------+ +-----------+
114 * | | | 6 | -------------> | 6 |
115 * | | +-----------+ +-----------+
116 * | | | 7 | -------------> | 7 |
117 * | | +-----------+ +-----------+
123 * The value of page->compound_head is the same for all tail pages. The first
124 * page of page structs (page 0) associated with the HugeTLB page contains the 4
125 * page structs necessary to describe the HugeTLB. The only use of the remaining
126 * pages of page structs (page 1 to page 7) is to point to page->compound_head.
127 * Therefore, we can remap pages 1 to 7 to page 0. Only 1 page of page structs
128 * will be used for each HugeTLB page. This will allow us to free the remaining
129 * 7 pages to the buddy allocator.
131 * Here is how things look after remapping.
133 * HugeTLB struct pages(8 pages) page frame(8 pages)
134 * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
135 * | | | 0 | -------------> | 0 |
136 * | | +-----------+ +-----------+
137 * | | | 1 | ---------------^ ^ ^ ^ ^ ^ ^
138 * | | +-----------+ | | | | | |
139 * | | | 2 | -----------------+ | | | | |
140 * | | +-----------+ | | | | |
141 * | | | 3 | -------------------+ | | | |
142 * | | +-----------+ | | | |
143 * | | | 4 | ---------------------+ | | |
144 * | PMD | +-----------+ | | |
145 * | level | | 5 | -----------------------+ | |
146 * | mapping | +-----------+ | |
147 * | | | 6 | -------------------------+ |
148 * | | +-----------+ |
149 * | | | 7 | ---------------------------+
156 * When a HugeTLB is freed to the buddy system, we should allocate 7 pages for
157 * vmemmap pages and restore the previous mapping relationship.
159 * For the HugeTLB page of the pud level mapping. It is similar to the former.
160 * We also can use this approach to free (PAGE_SIZE - 1) vmemmap pages.
162 * Apart from the HugeTLB page of the pmd/pud level mapping, some architectures
163 * (e.g. aarch64) provides a contiguous bit in the translation table entries
164 * that hints to the MMU to indicate that it is one of a contiguous set of
165 * entries that can be cached in a single TLB entry.
167 * The contiguous bit is used to increase the mapping size at the pmd and pte
168 * (last) level. So this type of HugeTLB page can be optimized only when its
169 * size of the struct page structs is greater than 1 page.
171 * Notice: The head vmemmap page is not freed to the buddy allocator and all
172 * tail vmemmap pages are mapped to the head vmemmap page frame. So we can see
173 * more than one struct page struct with PG_head (e.g. 8 per 2 MB HugeTLB page)
174 * associated with each HugeTLB page. The compound_head() can handle this
175 * correctly (more details refer to the comment above compound_head()).
177 #define pr_fmt(fmt) "HugeTLB: " fmt
179 #include "hugetlb_vmemmap.h"
182 * There are a lot of struct page structures associated with each HugeTLB page.
183 * For tail pages, the value of compound_head is the same. So we can reuse first
184 * page of head page structures. We map the virtual addresses of all the pages
185 * of tail page structures to the head page struct, and then free these page
186 * frames. Therefore, we need to reserve one pages as vmemmap areas.
188 #define RESERVE_VMEMMAP_NR 1U
189 #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
191 DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
192 hugetlb_free_vmemmap_enabled_key);
193 EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
195 static int __init early_hugetlb_free_vmemmap_param(char *buf)
197 /* We cannot optimize if a "struct page" crosses page boundaries. */
198 if (!is_power_of_2(sizeof(struct page))) {
199 pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n");
206 if (!strcmp(buf, "on"))
207 static_branch_enable(&hugetlb_free_vmemmap_enabled_key);
208 else if (!strcmp(buf, "off"))
209 static_branch_disable(&hugetlb_free_vmemmap_enabled_key);
215 early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
217 static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
219 return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
223 * Previously discarded vmemmap pages will be allocated and remapping
224 * after this function returns zero.
226 int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
229 unsigned long vmemmap_addr = (unsigned long)head;
230 unsigned long vmemmap_end, vmemmap_reuse;
232 if (!HPageVmemmapOptimized(head))
235 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
236 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
237 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
239 * The pages which the vmemmap virtual address range [@vmemmap_addr,
240 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
241 * the range is mapped to the page which @vmemmap_reuse is mapped to.
242 * When a HugeTLB page is freed to the buddy allocator, previously
243 * discarded vmemmap pages must be allocated and remapping.
245 ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
246 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
248 ClearHPageVmemmapOptimized(head);
253 void free_huge_page_vmemmap(struct hstate *h, struct page *head)
255 unsigned long vmemmap_addr = (unsigned long)head;
256 unsigned long vmemmap_end, vmemmap_reuse;
258 if (!free_vmemmap_pages_per_hpage(h))
261 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
262 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
263 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
266 * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
267 * to the page which @vmemmap_reuse is mapped to, then free the pages
268 * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
270 if (!vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
271 SetHPageVmemmapOptimized(head);
274 void __init hugetlb_vmemmap_init(struct hstate *h)
276 unsigned int nr_pages = pages_per_huge_page(h);
277 unsigned int vmemmap_pages;
280 * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
281 * page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
282 * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
284 BUILD_BUG_ON(__NR_USED_SUBPAGE >=
285 RESERVE_VMEMMAP_SIZE / sizeof(struct page));
287 if (!hugetlb_free_vmemmap_enabled())
290 vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
292 * The head page is not to be freed to buddy allocator, the other tail
293 * pages will map to the head page, so they can be freed.
295 * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
296 * on some architectures (e.g. aarch64). See Documentation/arm64/
297 * hugetlbpage.rst for more details.
299 if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
300 h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
302 pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,