1 // SPDX-License-Identifier: GPL-2.0
3 * HugeTLB Vmemmap Optimization (HVO)
5 * Copyright (c) 2020, ByteDance. All rights reserved.
7 * Author: Muchun Song <songmuchun@bytedance.com>
9 * See Documentation/mm/vmemmap_dedup.rst
11 #define pr_fmt(fmt) "HugeTLB: " fmt
13 #include <linux/pgtable.h>
14 #include <linux/bootmem_info.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
17 #include "hugetlb_vmemmap.h"
20 * struct vmemmap_remap_walk - walk vmemmap page table
22 * @remap_pte: called for each lowest-level entry (PTE).
23 * @nr_walked: the number of walked pte.
24 * @reuse_page: the page which is reused for the tail vmemmap pages.
25 * @reuse_addr: the virtual address of the @reuse_page page.
26 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
29 struct vmemmap_remap_walk {
30 void (*remap_pte)(pte_t *pte, unsigned long addr,
31 struct vmemmap_remap_walk *walk);
32 unsigned long nr_walked;
33 struct page *reuse_page;
34 unsigned long reuse_addr;
35 struct list_head *vmemmap_pages;
38 static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
42 unsigned long addr = start;
43 struct page *page = pmd_page(*pmd);
44 pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
49 pmd_populate_kernel(&init_mm, &__pmd, pgtable);
51 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
53 pgprot_t pgprot = PAGE_KERNEL;
55 entry = mk_pte(page + i, pgprot);
56 pte = pte_offset_kernel(&__pmd, addr);
57 set_pte_at(&init_mm, addr, pte, entry);
60 spin_lock(&init_mm.page_table_lock);
61 if (likely(pmd_leaf(*pmd))) {
63 * Higher order allocations from buddy allocator must be able to
64 * be treated as indepdenent small pages (as they can be freed
67 if (!PageReserved(page))
68 split_page(page, get_order(PMD_SIZE));
70 /* Make pte visible before pmd. See comment in pmd_install(). */
72 pmd_populate_kernel(&init_mm, pmd, pgtable);
73 flush_tlb_kernel_range(start, start + PMD_SIZE);
75 pte_free_kernel(&init_mm, pgtable);
77 spin_unlock(&init_mm.page_table_lock);
82 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
86 spin_lock(&init_mm.page_table_lock);
87 leaf = pmd_leaf(*pmd);
88 spin_unlock(&init_mm.page_table_lock);
93 return __split_vmemmap_huge_pmd(pmd, start);
96 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
98 struct vmemmap_remap_walk *walk)
100 pte_t *pte = pte_offset_kernel(pmd, addr);
103 * The reuse_page is found 'first' in table walk before we start
104 * remapping (which is calling @walk->remap_pte).
106 if (!walk->reuse_page) {
107 walk->reuse_page = pte_page(*pte);
109 * Because the reuse address is part of the range that we are
110 * walking, skip the reuse address range.
117 for (; addr != end; addr += PAGE_SIZE, pte++) {
118 walk->remap_pte(pte, addr, walk);
123 static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
125 struct vmemmap_remap_walk *walk)
130 pmd = pmd_offset(pud, addr);
134 ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
138 next = pmd_addr_end(addr, end);
139 vmemmap_pte_range(pmd, addr, next, walk);
140 } while (pmd++, addr = next, addr != end);
145 static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
147 struct vmemmap_remap_walk *walk)
152 pud = pud_offset(p4d, addr);
156 next = pud_addr_end(addr, end);
157 ret = vmemmap_pmd_range(pud, addr, next, walk);
160 } while (pud++, addr = next, addr != end);
165 static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
167 struct vmemmap_remap_walk *walk)
172 p4d = p4d_offset(pgd, addr);
176 next = p4d_addr_end(addr, end);
177 ret = vmemmap_pud_range(p4d, addr, next, walk);
180 } while (p4d++, addr = next, addr != end);
185 static int vmemmap_remap_range(unsigned long start, unsigned long end,
186 struct vmemmap_remap_walk *walk)
188 unsigned long addr = start;
192 VM_BUG_ON(!PAGE_ALIGNED(start));
193 VM_BUG_ON(!PAGE_ALIGNED(end));
195 pgd = pgd_offset_k(addr);
199 next = pgd_addr_end(addr, end);
200 ret = vmemmap_p4d_range(pgd, addr, next, walk);
203 } while (pgd++, addr = next, addr != end);
206 * We only change the mapping of the vmemmap virtual address range
207 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
208 * belongs to the range.
210 flush_tlb_kernel_range(start + PAGE_SIZE, end);
216 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
217 * allocator or buddy allocator. If the PG_reserved flag is set, it means
218 * that it allocated from the memblock allocator, just free it via the
219 * free_bootmem_page(). Otherwise, use __free_page().
221 static inline void free_vmemmap_page(struct page *page)
223 if (PageReserved(page))
224 free_bootmem_page(page);
229 /* Free a list of the vmemmap pages */
230 static void free_vmemmap_page_list(struct list_head *list)
232 struct page *page, *next;
234 list_for_each_entry_safe(page, next, list, lru) {
235 list_del(&page->lru);
236 free_vmemmap_page(page);
240 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
241 struct vmemmap_remap_walk *walk)
244 * Remap the tail pages as read-only to catch illegal write operation
247 pgprot_t pgprot = PAGE_KERNEL_RO;
248 pte_t entry = mk_pte(walk->reuse_page, pgprot);
249 struct page *page = pte_page(*pte);
251 list_add_tail(&page->lru, walk->vmemmap_pages);
252 set_pte_at(&init_mm, addr, pte, entry);
256 * How many struct page structs need to be reset. When we reuse the head
257 * struct page, the special metadata (e.g. page->flags or page->mapping)
258 * cannot copy to the tail struct page structs. The invalid value will be
259 * checked in the free_tail_pages_check(). In order to avoid the message
260 * of "corrupted mapping in tail page". We need to reset at least 3 (one
261 * head struct page struct and two tail struct page structs) struct page
264 #define NR_RESET_STRUCT_PAGE 3
266 static inline void reset_struct_pages(struct page *start)
268 struct page *from = start + NR_RESET_STRUCT_PAGE;
270 BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
271 memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
274 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
275 struct vmemmap_remap_walk *walk)
277 pgprot_t pgprot = PAGE_KERNEL;
281 BUG_ON(pte_page(*pte) != walk->reuse_page);
283 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
284 list_del(&page->lru);
285 to = page_to_virt(page);
286 copy_page(to, (void *)walk->reuse_addr);
287 reset_struct_pages(to);
290 * Makes sure that preceding stores to the page contents become visible
291 * before the set_pte_at() write.
294 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
298 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
299 * to the page which @reuse is mapped to, then free vmemmap
300 * which the range are mapped to.
301 * @start: start address of the vmemmap virtual address range that we want
303 * @end: end address of the vmemmap virtual address range that we want to
305 * @reuse: reuse address.
307 * Return: %0 on success, negative error code otherwise.
309 static int vmemmap_remap_free(unsigned long start, unsigned long end,
313 LIST_HEAD(vmemmap_pages);
314 struct vmemmap_remap_walk walk = {
315 .remap_pte = vmemmap_remap_pte,
317 .vmemmap_pages = &vmemmap_pages,
321 * In order to make remapping routine most efficient for the huge pages,
322 * the routine of vmemmap page table walking has the following rules
323 * (see more details from the vmemmap_pte_range()):
325 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
326 * should be continuous.
327 * - The @reuse address is part of the range [@reuse, @end) that we are
328 * walking which is passed to vmemmap_remap_range().
329 * - The @reuse address is the first in the complete range.
331 * So we need to make sure that @start and @reuse meet the above rules.
333 BUG_ON(start - reuse != PAGE_SIZE);
335 mmap_read_lock(&init_mm);
336 ret = vmemmap_remap_range(reuse, end, &walk);
337 if (ret && walk.nr_walked) {
338 end = reuse + walk.nr_walked * PAGE_SIZE;
340 * vmemmap_pages contains pages from the previous
341 * vmemmap_remap_range call which failed. These
342 * are pages which were removed from the vmemmap.
343 * They will be restored in the following call.
345 walk = (struct vmemmap_remap_walk) {
346 .remap_pte = vmemmap_restore_pte,
348 .vmemmap_pages = &vmemmap_pages,
351 vmemmap_remap_range(reuse, end, &walk);
353 mmap_read_unlock(&init_mm);
355 free_vmemmap_page_list(&vmemmap_pages);
360 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
361 gfp_t gfp_mask, struct list_head *list)
363 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
364 int nid = page_to_nid((struct page *)start);
365 struct page *page, *next;
368 page = alloc_pages_node(nid, gfp_mask, 0);
371 list_add_tail(&page->lru, list);
376 list_for_each_entry_safe(page, next, list, lru)
377 __free_pages(page, 0);
382 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
383 * to the page which is from the @vmemmap_pages
385 * @start: start address of the vmemmap virtual address range that we want
387 * @end: end address of the vmemmap virtual address range that we want to
389 * @reuse: reuse address.
390 * @gfp_mask: GFP flag for allocating vmemmap pages.
392 * Return: %0 on success, negative error code otherwise.
394 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
395 unsigned long reuse, gfp_t gfp_mask)
397 LIST_HEAD(vmemmap_pages);
398 struct vmemmap_remap_walk walk = {
399 .remap_pte = vmemmap_restore_pte,
401 .vmemmap_pages = &vmemmap_pages,
404 /* See the comment in the vmemmap_remap_free(). */
405 BUG_ON(start - reuse != PAGE_SIZE);
407 if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
410 mmap_read_lock(&init_mm);
411 vmemmap_remap_range(reuse, end, &walk);
412 mmap_read_unlock(&init_mm);
417 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
418 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
420 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
421 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
424 * hugetlb_vmemmap_restore - restore previously optimized (by
425 * hugetlb_vmemmap_optimize()) vmemmap pages which
426 * will be reallocated and remapped.
428 * @head: the head page whose vmemmap pages will be restored.
430 * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
431 * negative error code otherwise.
433 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
436 unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
437 unsigned long vmemmap_reuse;
439 if (!HPageVmemmapOptimized(head))
442 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
443 vmemmap_reuse = vmemmap_start;
444 vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
447 * The pages which the vmemmap virtual address range [@vmemmap_start,
448 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
449 * the range is mapped to the page which @vmemmap_reuse is mapped to.
450 * When a HugeTLB page is freed to the buddy allocator, previously
451 * discarded vmemmap pages must be allocated and remapping.
453 ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse,
454 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
456 ClearHPageVmemmapOptimized(head);
457 static_branch_dec(&hugetlb_optimize_vmemmap_key);
463 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
464 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
466 if (!READ_ONCE(vmemmap_optimize_enabled))
469 if (!hugetlb_vmemmap_optimizable(h))
472 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
474 struct page *vmemmap_page;
475 unsigned long vaddr = (unsigned long)head;
478 * Only the vmemmap page's vmemmap page can be self-hosted.
479 * Walking the page tables to find the backing page of the
482 pmdp = pmd_off_k(vaddr);
484 * The READ_ONCE() is used to stabilize *pmdp in a register or
485 * on the stack so that it will stop changing under the code.
486 * The only concurrent operation where it can be changed is
487 * split_vmemmap_huge_pmd() (*pmdp will be stable after this
490 pmd = READ_ONCE(*pmdp);
492 vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
494 vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
496 * Due to HugeTLB alignment requirements and the vmemmap pages
497 * being at the start of the hotplugged memory region in
498 * memory_hotplug.memmap_on_memory case. Checking any vmemmap
499 * page's vmemmap page if it is marked as VmemmapSelfHosted is
502 * [ hotplugged memory ]
503 * [ section ][...][ section ]
504 * [ vmemmap ][ usable memory ]
510 * +-------------------------------------------+
512 if (PageVmemmapSelfHosted(vmemmap_page))
520 * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
522 * @head: the head page whose vmemmap pages will be optimized.
524 * This function only tries to optimize @head's vmemmap pages and does not
525 * guarantee that the optimization will succeed after it returns. The caller
526 * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
527 * have been optimized.
529 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
531 unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
532 unsigned long vmemmap_reuse;
534 if (!vmemmap_should_optimize(h, head))
537 static_branch_inc(&hugetlb_optimize_vmemmap_key);
539 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
540 vmemmap_reuse = vmemmap_start;
541 vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
544 * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
545 * to the page which @vmemmap_reuse is mapped to, then free the pages
546 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
548 if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
549 static_branch_dec(&hugetlb_optimize_vmemmap_key);
551 SetHPageVmemmapOptimized(head);
554 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
556 .procname = "hugetlb_optimize_vmemmap",
557 .data = &vmemmap_optimize_enabled,
558 .maxlen = sizeof(int),
560 .proc_handler = proc_dobool,
565 static int __init hugetlb_vmemmap_init(void)
567 /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
568 BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE);
570 if (IS_ENABLED(CONFIG_PROC_SYSCTL)) {
571 const struct hstate *h;
574 if (hugetlb_vmemmap_optimizable(h)) {
575 register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
582 late_initcall(hugetlb_vmemmap_init);