1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/vmalloc.h>
50 #include "pgalloc-track.h"
52 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
53 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
55 static int __init set_nohugeiomap(char *str)
57 ioremap_max_page_shift = PAGE_SHIFT;
60 early_param("nohugeiomap", set_nohugeiomap);
61 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
63 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
65 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66 static bool __ro_after_init vmap_allow_huge = true;
68 static int __init set_nohugevmalloc(char *str)
70 vmap_allow_huge = false;
73 early_param("nohugevmalloc", set_nohugevmalloc);
74 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75 static const bool vmap_allow_huge = false;
76 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78 bool is_vmalloc_addr(const void *x)
80 unsigned long addr = (unsigned long)kasan_reset_tag(x);
82 return addr >= VMALLOC_START && addr < VMALLOC_END;
84 EXPORT_SYMBOL(is_vmalloc_addr);
86 struct vfree_deferred {
87 struct llist_head list;
88 struct work_struct wq;
90 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
92 /*** Page table manipulation functions ***/
93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
94 phys_addr_t phys_addr, pgprot_t prot,
95 unsigned int max_page_shift, pgtbl_mod_mask *mask)
99 unsigned long size = PAGE_SIZE;
101 pfn = phys_addr >> PAGE_SHIFT;
102 pte = pte_alloc_kernel_track(pmd, addr, mask);
106 BUG_ON(!pte_none(ptep_get(pte)));
108 #ifdef CONFIG_HUGETLB_PAGE
109 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110 if (size != PAGE_SIZE) {
111 pte_t entry = pfn_pte(pfn, prot);
113 entry = arch_make_huge_pte(entry, ilog2(size), 0);
114 set_huge_pte_at(&init_mm, addr, pte, entry);
115 pfn += PFN_DOWN(size);
119 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
121 } while (pte += PFN_DOWN(size), addr += size, addr != end);
122 *mask |= PGTBL_PTE_MODIFIED;
126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
127 phys_addr_t phys_addr, pgprot_t prot,
128 unsigned int max_page_shift)
130 if (max_page_shift < PMD_SHIFT)
133 if (!arch_vmap_pmd_supported(prot))
136 if ((end - addr) != PMD_SIZE)
139 if (!IS_ALIGNED(addr, PMD_SIZE))
142 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
148 return pmd_set_huge(pmd, phys_addr, prot);
151 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
152 phys_addr_t phys_addr, pgprot_t prot,
153 unsigned int max_page_shift, pgtbl_mod_mask *mask)
158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
162 next = pmd_addr_end(addr, end);
164 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
166 *mask |= PGTBL_PMD_MODIFIED;
170 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
172 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
176 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
177 phys_addr_t phys_addr, pgprot_t prot,
178 unsigned int max_page_shift)
180 if (max_page_shift < PUD_SHIFT)
183 if (!arch_vmap_pud_supported(prot))
186 if ((end - addr) != PUD_SIZE)
189 if (!IS_ALIGNED(addr, PUD_SIZE))
192 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
195 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
198 return pud_set_huge(pud, phys_addr, prot);
201 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
202 phys_addr_t phys_addr, pgprot_t prot,
203 unsigned int max_page_shift, pgtbl_mod_mask *mask)
208 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
212 next = pud_addr_end(addr, end);
214 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
216 *mask |= PGTBL_PUD_MODIFIED;
220 if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
221 max_page_shift, mask))
223 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
227 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
228 phys_addr_t phys_addr, pgprot_t prot,
229 unsigned int max_page_shift)
231 if (max_page_shift < P4D_SHIFT)
234 if (!arch_vmap_p4d_supported(prot))
237 if ((end - addr) != P4D_SIZE)
240 if (!IS_ALIGNED(addr, P4D_SIZE))
243 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
246 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
249 return p4d_set_huge(p4d, phys_addr, prot);
252 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
253 phys_addr_t phys_addr, pgprot_t prot,
254 unsigned int max_page_shift, pgtbl_mod_mask *mask)
259 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
263 next = p4d_addr_end(addr, end);
265 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
267 *mask |= PGTBL_P4D_MODIFIED;
271 if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
272 max_page_shift, mask))
274 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
278 static int vmap_range_noflush(unsigned long addr, unsigned long end,
279 phys_addr_t phys_addr, pgprot_t prot,
280 unsigned int max_page_shift)
286 pgtbl_mod_mask mask = 0;
292 pgd = pgd_offset_k(addr);
294 next = pgd_addr_end(addr, end);
295 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
296 max_page_shift, &mask);
299 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
301 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
302 arch_sync_kernel_mappings(start, end);
307 int ioremap_page_range(unsigned long addr, unsigned long end,
308 phys_addr_t phys_addr, pgprot_t prot)
312 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
313 ioremap_max_page_shift);
314 flush_cache_vmap(addr, end);
316 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317 ioremap_max_page_shift);
321 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
322 pgtbl_mod_mask *mask)
326 pte = pte_offset_kernel(pmd, addr);
328 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
329 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
330 } while (pte++, addr += PAGE_SIZE, addr != end);
331 *mask |= PGTBL_PTE_MODIFIED;
334 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
335 pgtbl_mod_mask *mask)
341 pmd = pmd_offset(pud, addr);
343 next = pmd_addr_end(addr, end);
345 cleared = pmd_clear_huge(pmd);
346 if (cleared || pmd_bad(*pmd))
347 *mask |= PGTBL_PMD_MODIFIED;
351 if (pmd_none_or_clear_bad(pmd))
353 vunmap_pte_range(pmd, addr, next, mask);
356 } while (pmd++, addr = next, addr != end);
359 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
360 pgtbl_mod_mask *mask)
366 pud = pud_offset(p4d, addr);
368 next = pud_addr_end(addr, end);
370 cleared = pud_clear_huge(pud);
371 if (cleared || pud_bad(*pud))
372 *mask |= PGTBL_PUD_MODIFIED;
376 if (pud_none_or_clear_bad(pud))
378 vunmap_pmd_range(pud, addr, next, mask);
379 } while (pud++, addr = next, addr != end);
382 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
383 pgtbl_mod_mask *mask)
388 p4d = p4d_offset(pgd, addr);
390 next = p4d_addr_end(addr, end);
394 *mask |= PGTBL_P4D_MODIFIED;
396 if (p4d_none_or_clear_bad(p4d))
398 vunmap_pud_range(p4d, addr, next, mask);
399 } while (p4d++, addr = next, addr != end);
403 * vunmap_range_noflush is similar to vunmap_range, but does not
404 * flush caches or TLBs.
406 * The caller is responsible for calling flush_cache_vmap() before calling
407 * this function, and flush_tlb_kernel_range after it has returned
408 * successfully (and before the addresses are expected to cause a page fault
409 * or be re-mapped for something else, if TLB flushes are being delayed or
412 * This is an internal function only. Do not use outside mm/.
414 void __vunmap_range_noflush(unsigned long start, unsigned long end)
418 unsigned long addr = start;
419 pgtbl_mod_mask mask = 0;
422 pgd = pgd_offset_k(addr);
424 next = pgd_addr_end(addr, end);
426 mask |= PGTBL_PGD_MODIFIED;
427 if (pgd_none_or_clear_bad(pgd))
429 vunmap_p4d_range(pgd, addr, next, &mask);
430 } while (pgd++, addr = next, addr != end);
432 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
433 arch_sync_kernel_mappings(start, end);
436 void vunmap_range_noflush(unsigned long start, unsigned long end)
438 kmsan_vunmap_range_noflush(start, end);
439 __vunmap_range_noflush(start, end);
443 * vunmap_range - unmap kernel virtual addresses
444 * @addr: start of the VM area to unmap
445 * @end: end of the VM area to unmap (non-inclusive)
447 * Clears any present PTEs in the virtual address range, flushes TLBs and
448 * caches. Any subsequent access to the address before it has been re-mapped
451 void vunmap_range(unsigned long addr, unsigned long end)
453 flush_cache_vunmap(addr, end);
454 vunmap_range_noflush(addr, end);
455 flush_tlb_kernel_range(addr, end);
458 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
459 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
460 pgtbl_mod_mask *mask)
465 * nr is a running index into the array which helps higher level
466 * callers keep track of where we're up to.
469 pte = pte_alloc_kernel_track(pmd, addr, mask);
473 struct page *page = pages[*nr];
475 if (WARN_ON(!pte_none(ptep_get(pte))))
479 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
482 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
484 } while (pte++, addr += PAGE_SIZE, addr != end);
485 *mask |= PGTBL_PTE_MODIFIED;
489 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
490 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
491 pgtbl_mod_mask *mask)
496 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
500 next = pmd_addr_end(addr, end);
501 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
503 } while (pmd++, addr = next, addr != end);
507 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
508 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
509 pgtbl_mod_mask *mask)
514 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
518 next = pud_addr_end(addr, end);
519 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
521 } while (pud++, addr = next, addr != end);
525 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
526 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
527 pgtbl_mod_mask *mask)
532 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
536 next = p4d_addr_end(addr, end);
537 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
539 } while (p4d++, addr = next, addr != end);
543 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
544 pgprot_t prot, struct page **pages)
546 unsigned long start = addr;
551 pgtbl_mod_mask mask = 0;
554 pgd = pgd_offset_k(addr);
556 next = pgd_addr_end(addr, end);
558 mask |= PGTBL_PGD_MODIFIED;
559 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
562 } while (pgd++, addr = next, addr != end);
564 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
565 arch_sync_kernel_mappings(start, end);
571 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
574 * The caller is responsible for calling flush_cache_vmap() after this
575 * function returns successfully and before the addresses are accessed.
577 * This is an internal function only. Do not use outside mm/.
579 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
580 pgprot_t prot, struct page **pages, unsigned int page_shift)
582 unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
584 WARN_ON(page_shift < PAGE_SHIFT);
586 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
587 page_shift == PAGE_SHIFT)
588 return vmap_small_pages_range_noflush(addr, end, prot, pages);
590 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
593 err = vmap_range_noflush(addr, addr + (1UL << page_shift),
594 page_to_phys(pages[i]), prot,
599 addr += 1UL << page_shift;
605 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
606 pgprot_t prot, struct page **pages, unsigned int page_shift)
608 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
613 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
617 * vmap_pages_range - map pages to a kernel virtual address
618 * @addr: start of the VM area to map
619 * @end: end of the VM area to map (non-inclusive)
620 * @prot: page protection flags to use
621 * @pages: pages to map (always PAGE_SIZE pages)
622 * @page_shift: maximum shift that the pages may be mapped with, @pages must
623 * be aligned and contiguous up to at least this shift.
626 * 0 on success, -errno on failure.
628 static int vmap_pages_range(unsigned long addr, unsigned long end,
629 pgprot_t prot, struct page **pages, unsigned int page_shift)
633 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
634 flush_cache_vmap(addr, end);
638 int is_vmalloc_or_module_addr(const void *x)
641 * ARM, x86-64 and sparc64 put modules in a special place,
642 * and fall back on vmalloc() if that fails. Others
643 * just put it in the vmalloc space.
645 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
646 unsigned long addr = (unsigned long)kasan_reset_tag(x);
647 if (addr >= MODULES_VADDR && addr < MODULES_END)
650 return is_vmalloc_addr(x);
652 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
655 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
656 * return the tail page that corresponds to the base page address, which
657 * matches small vmap mappings.
659 struct page *vmalloc_to_page(const void *vmalloc_addr)
661 unsigned long addr = (unsigned long) vmalloc_addr;
662 struct page *page = NULL;
663 pgd_t *pgd = pgd_offset_k(addr);
670 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
671 * architectures that do not vmalloc module space
673 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
677 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
678 return NULL; /* XXX: no allowance for huge pgd */
679 if (WARN_ON_ONCE(pgd_bad(*pgd)))
682 p4d = p4d_offset(pgd, addr);
686 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
687 if (WARN_ON_ONCE(p4d_bad(*p4d)))
690 pud = pud_offset(p4d, addr);
694 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
695 if (WARN_ON_ONCE(pud_bad(*pud)))
698 pmd = pmd_offset(pud, addr);
702 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
703 if (WARN_ON_ONCE(pmd_bad(*pmd)))
706 ptep = pte_offset_kernel(pmd, addr);
707 pte = ptep_get(ptep);
708 if (pte_present(pte))
709 page = pte_page(pte);
713 EXPORT_SYMBOL(vmalloc_to_page);
716 * Map a vmalloc()-space virtual address to the physical page frame number.
718 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
720 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
722 EXPORT_SYMBOL(vmalloc_to_pfn);
725 /*** Global kva allocator ***/
727 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
728 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
731 static DEFINE_SPINLOCK(vmap_area_lock);
732 static DEFINE_SPINLOCK(free_vmap_area_lock);
733 /* Export for kexec only */
734 LIST_HEAD(vmap_area_list);
735 static struct rb_root vmap_area_root = RB_ROOT;
736 static bool vmap_initialized __read_mostly;
738 static struct rb_root purge_vmap_area_root = RB_ROOT;
739 static LIST_HEAD(purge_vmap_area_list);
740 static DEFINE_SPINLOCK(purge_vmap_area_lock);
743 * This kmem_cache is used for vmap_area objects. Instead of
744 * allocating from slab we reuse an object from this cache to
745 * make things faster. Especially in "no edge" splitting of
748 static struct kmem_cache *vmap_area_cachep;
751 * This linked list is used in pair with free_vmap_area_root.
752 * It gives O(1) access to prev/next to perform fast coalescing.
754 static LIST_HEAD(free_vmap_area_list);
757 * This augment red-black tree represents the free vmap space.
758 * All vmap_area objects in this tree are sorted by va->va_start
759 * address. It is used for allocation and merging when a vmap
760 * object is released.
762 * Each vmap_area node contains a maximum available free block
763 * of its sub-tree, right or left. Therefore it is possible to
764 * find a lowest match of free area.
766 static struct rb_root free_vmap_area_root = RB_ROOT;
769 * Preload a CPU with one object for "no edge" split case. The
770 * aim is to get rid of allocations from the atomic context, thus
771 * to use more permissive allocation masks.
773 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
775 static __always_inline unsigned long
776 va_size(struct vmap_area *va)
778 return (va->va_end - va->va_start);
781 static __always_inline unsigned long
782 get_subtree_max_size(struct rb_node *node)
784 struct vmap_area *va;
786 va = rb_entry_safe(node, struct vmap_area, rb_node);
787 return va ? va->subtree_max_size : 0;
790 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
791 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
793 static void reclaim_and_purge_vmap_areas(void);
794 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
795 static void drain_vmap_area_work(struct work_struct *work);
796 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
798 static atomic_long_t nr_vmalloc_pages;
800 unsigned long vmalloc_nr_pages(void)
802 return atomic_long_read(&nr_vmalloc_pages);
805 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
806 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
808 struct vmap_area *va = NULL;
809 struct rb_node *n = vmap_area_root.rb_node;
811 addr = (unsigned long)kasan_reset_tag((void *)addr);
814 struct vmap_area *tmp;
816 tmp = rb_entry(n, struct vmap_area, rb_node);
817 if (tmp->va_end > addr) {
819 if (tmp->va_start <= addr)
830 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
832 struct rb_node *n = root->rb_node;
834 addr = (unsigned long)kasan_reset_tag((void *)addr);
837 struct vmap_area *va;
839 va = rb_entry(n, struct vmap_area, rb_node);
840 if (addr < va->va_start)
842 else if (addr >= va->va_end)
852 * This function returns back addresses of parent node
853 * and its left or right link for further processing.
855 * Otherwise NULL is returned. In that case all further
856 * steps regarding inserting of conflicting overlap range
857 * have to be declined and actually considered as a bug.
859 static __always_inline struct rb_node **
860 find_va_links(struct vmap_area *va,
861 struct rb_root *root, struct rb_node *from,
862 struct rb_node **parent)
864 struct vmap_area *tmp_va;
865 struct rb_node **link;
868 link = &root->rb_node;
869 if (unlikely(!*link)) {
878 * Go to the bottom of the tree. When we hit the last point
879 * we end up with parent rb_node and correct direction, i name
880 * it link, where the new va->rb_node will be attached to.
883 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
886 * During the traversal we also do some sanity check.
887 * Trigger the BUG() if there are sides(left/right)
890 if (va->va_end <= tmp_va->va_start)
891 link = &(*link)->rb_left;
892 else if (va->va_start >= tmp_va->va_end)
893 link = &(*link)->rb_right;
895 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
896 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
902 *parent = &tmp_va->rb_node;
906 static __always_inline struct list_head *
907 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
909 struct list_head *list;
911 if (unlikely(!parent))
913 * The red-black tree where we try to find VA neighbors
914 * before merging or inserting is empty, i.e. it means
915 * there is no free vmap space. Normally it does not
916 * happen but we handle this case anyway.
920 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
921 return (&parent->rb_right == link ? list->next : list);
924 static __always_inline void
925 __link_va(struct vmap_area *va, struct rb_root *root,
926 struct rb_node *parent, struct rb_node **link,
927 struct list_head *head, bool augment)
930 * VA is still not in the list, but we can
931 * identify its future previous list_head node.
933 if (likely(parent)) {
934 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
935 if (&parent->rb_right != link)
939 /* Insert to the rb-tree */
940 rb_link_node(&va->rb_node, parent, link);
943 * Some explanation here. Just perform simple insertion
944 * to the tree. We do not set va->subtree_max_size to
945 * its current size before calling rb_insert_augmented().
946 * It is because we populate the tree from the bottom
947 * to parent levels when the node _is_ in the tree.
949 * Therefore we set subtree_max_size to zero after insertion,
950 * to let __augment_tree_propagate_from() puts everything to
951 * the correct order later on.
953 rb_insert_augmented(&va->rb_node,
954 root, &free_vmap_area_rb_augment_cb);
955 va->subtree_max_size = 0;
957 rb_insert_color(&va->rb_node, root);
960 /* Address-sort this list */
961 list_add(&va->list, head);
964 static __always_inline void
965 link_va(struct vmap_area *va, struct rb_root *root,
966 struct rb_node *parent, struct rb_node **link,
967 struct list_head *head)
969 __link_va(va, root, parent, link, head, false);
972 static __always_inline void
973 link_va_augment(struct vmap_area *va, struct rb_root *root,
974 struct rb_node *parent, struct rb_node **link,
975 struct list_head *head)
977 __link_va(va, root, parent, link, head, true);
980 static __always_inline void
981 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
983 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
987 rb_erase_augmented(&va->rb_node,
988 root, &free_vmap_area_rb_augment_cb);
990 rb_erase(&va->rb_node, root);
992 list_del_init(&va->list);
993 RB_CLEAR_NODE(&va->rb_node);
996 static __always_inline void
997 unlink_va(struct vmap_area *va, struct rb_root *root)
999 __unlink_va(va, root, false);
1002 static __always_inline void
1003 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1005 __unlink_va(va, root, true);
1008 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1010 * Gets called when remove the node and rotate.
1012 static __always_inline unsigned long
1013 compute_subtree_max_size(struct vmap_area *va)
1015 return max3(va_size(va),
1016 get_subtree_max_size(va->rb_node.rb_left),
1017 get_subtree_max_size(va->rb_node.rb_right));
1021 augment_tree_propagate_check(void)
1023 struct vmap_area *va;
1024 unsigned long computed_size;
1026 list_for_each_entry(va, &free_vmap_area_list, list) {
1027 computed_size = compute_subtree_max_size(va);
1028 if (computed_size != va->subtree_max_size)
1029 pr_emerg("tree is corrupted: %lu, %lu\n",
1030 va_size(va), va->subtree_max_size);
1036 * This function populates subtree_max_size from bottom to upper
1037 * levels starting from VA point. The propagation must be done
1038 * when VA size is modified by changing its va_start/va_end. Or
1039 * in case of newly inserting of VA to the tree.
1041 * It means that __augment_tree_propagate_from() must be called:
1042 * - After VA has been inserted to the tree(free path);
1043 * - After VA has been shrunk(allocation path);
1044 * - After VA has been increased(merging path).
1046 * Please note that, it does not mean that upper parent nodes
1047 * and their subtree_max_size are recalculated all the time up
1056 * For example if we modify the node 4, shrinking it to 2, then
1057 * no any modification is required. If we shrink the node 2 to 1
1058 * its subtree_max_size is updated only, and set to 1. If we shrink
1059 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1060 * node becomes 4--6.
1062 static __always_inline void
1063 augment_tree_propagate_from(struct vmap_area *va)
1066 * Populate the tree from bottom towards the root until
1067 * the calculated maximum available size of checked node
1068 * is equal to its current one.
1070 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1072 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1073 augment_tree_propagate_check();
1078 insert_vmap_area(struct vmap_area *va,
1079 struct rb_root *root, struct list_head *head)
1081 struct rb_node **link;
1082 struct rb_node *parent;
1084 link = find_va_links(va, root, NULL, &parent);
1086 link_va(va, root, parent, link, head);
1090 insert_vmap_area_augment(struct vmap_area *va,
1091 struct rb_node *from, struct rb_root *root,
1092 struct list_head *head)
1094 struct rb_node **link;
1095 struct rb_node *parent;
1098 link = find_va_links(va, NULL, from, &parent);
1100 link = find_va_links(va, root, NULL, &parent);
1103 link_va_augment(va, root, parent, link, head);
1104 augment_tree_propagate_from(va);
1109 * Merge de-allocated chunk of VA memory with previous
1110 * and next free blocks. If coalesce is not done a new
1111 * free area is inserted. If VA has been merged, it is
1114 * Please note, it can return NULL in case of overlap
1115 * ranges, followed by WARN() report. Despite it is a
1116 * buggy behaviour, a system can be alive and keep
1119 static __always_inline struct vmap_area *
1120 __merge_or_add_vmap_area(struct vmap_area *va,
1121 struct rb_root *root, struct list_head *head, bool augment)
1123 struct vmap_area *sibling;
1124 struct list_head *next;
1125 struct rb_node **link;
1126 struct rb_node *parent;
1127 bool merged = false;
1130 * Find a place in the tree where VA potentially will be
1131 * inserted, unless it is merged with its sibling/siblings.
1133 link = find_va_links(va, root, NULL, &parent);
1138 * Get next node of VA to check if merging can be done.
1140 next = get_va_next_sibling(parent, link);
1141 if (unlikely(next == NULL))
1147 * |<------VA------>|<-----Next----->|
1152 sibling = list_entry(next, struct vmap_area, list);
1153 if (sibling->va_start == va->va_end) {
1154 sibling->va_start = va->va_start;
1156 /* Free vmap_area object. */
1157 kmem_cache_free(vmap_area_cachep, va);
1159 /* Point to the new merged area. */
1168 * |<-----Prev----->|<------VA------>|
1172 if (next->prev != head) {
1173 sibling = list_entry(next->prev, struct vmap_area, list);
1174 if (sibling->va_end == va->va_start) {
1176 * If both neighbors are coalesced, it is important
1177 * to unlink the "next" node first, followed by merging
1178 * with "previous" one. Otherwise the tree might not be
1179 * fully populated if a sibling's augmented value is
1180 * "normalized" because of rotation operations.
1183 __unlink_va(va, root, augment);
1185 sibling->va_end = va->va_end;
1187 /* Free vmap_area object. */
1188 kmem_cache_free(vmap_area_cachep, va);
1190 /* Point to the new merged area. */
1198 __link_va(va, root, parent, link, head, augment);
1203 static __always_inline struct vmap_area *
1204 merge_or_add_vmap_area(struct vmap_area *va,
1205 struct rb_root *root, struct list_head *head)
1207 return __merge_or_add_vmap_area(va, root, head, false);
1210 static __always_inline struct vmap_area *
1211 merge_or_add_vmap_area_augment(struct vmap_area *va,
1212 struct rb_root *root, struct list_head *head)
1214 va = __merge_or_add_vmap_area(va, root, head, true);
1216 augment_tree_propagate_from(va);
1221 static __always_inline bool
1222 is_within_this_va(struct vmap_area *va, unsigned long size,
1223 unsigned long align, unsigned long vstart)
1225 unsigned long nva_start_addr;
1227 if (va->va_start > vstart)
1228 nva_start_addr = ALIGN(va->va_start, align);
1230 nva_start_addr = ALIGN(vstart, align);
1232 /* Can be overflowed due to big size or alignment. */
1233 if (nva_start_addr + size < nva_start_addr ||
1234 nva_start_addr < vstart)
1237 return (nva_start_addr + size <= va->va_end);
1241 * Find the first free block(lowest start address) in the tree,
1242 * that will accomplish the request corresponding to passing
1243 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1244 * a search length is adjusted to account for worst case alignment
1247 static __always_inline struct vmap_area *
1248 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1249 unsigned long align, unsigned long vstart, bool adjust_search_size)
1251 struct vmap_area *va;
1252 struct rb_node *node;
1253 unsigned long length;
1255 /* Start from the root. */
1256 node = root->rb_node;
1258 /* Adjust the search size for alignment overhead. */
1259 length = adjust_search_size ? size + align - 1 : size;
1262 va = rb_entry(node, struct vmap_area, rb_node);
1264 if (get_subtree_max_size(node->rb_left) >= length &&
1265 vstart < va->va_start) {
1266 node = node->rb_left;
1268 if (is_within_this_va(va, size, align, vstart))
1272 * Does not make sense to go deeper towards the right
1273 * sub-tree if it does not have a free block that is
1274 * equal or bigger to the requested search length.
1276 if (get_subtree_max_size(node->rb_right) >= length) {
1277 node = node->rb_right;
1282 * OK. We roll back and find the first right sub-tree,
1283 * that will satisfy the search criteria. It can happen
1284 * due to "vstart" restriction or an alignment overhead
1285 * that is bigger then PAGE_SIZE.
1287 while ((node = rb_parent(node))) {
1288 va = rb_entry(node, struct vmap_area, rb_node);
1289 if (is_within_this_va(va, size, align, vstart))
1292 if (get_subtree_max_size(node->rb_right) >= length &&
1293 vstart <= va->va_start) {
1295 * Shift the vstart forward. Please note, we update it with
1296 * parent's start address adding "1" because we do not want
1297 * to enter same sub-tree after it has already been checked
1298 * and no suitable free block found there.
1300 vstart = va->va_start + 1;
1301 node = node->rb_right;
1311 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1312 #include <linux/random.h>
1314 static struct vmap_area *
1315 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1316 unsigned long align, unsigned long vstart)
1318 struct vmap_area *va;
1320 list_for_each_entry(va, head, list) {
1321 if (!is_within_this_va(va, size, align, vstart))
1331 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1332 unsigned long size, unsigned long align)
1334 struct vmap_area *va_1, *va_2;
1335 unsigned long vstart;
1338 get_random_bytes(&rnd, sizeof(rnd));
1339 vstart = VMALLOC_START + rnd;
1341 va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1342 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1345 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1346 va_1, va_2, vstart);
1352 FL_FIT_TYPE = 1, /* full fit */
1353 LE_FIT_TYPE = 2, /* left edge fit */
1354 RE_FIT_TYPE = 3, /* right edge fit */
1355 NE_FIT_TYPE = 4 /* no edge fit */
1358 static __always_inline enum fit_type
1359 classify_va_fit_type(struct vmap_area *va,
1360 unsigned long nva_start_addr, unsigned long size)
1364 /* Check if it is within VA. */
1365 if (nva_start_addr < va->va_start ||
1366 nva_start_addr + size > va->va_end)
1370 if (va->va_start == nva_start_addr) {
1371 if (va->va_end == nva_start_addr + size)
1375 } else if (va->va_end == nva_start_addr + size) {
1384 static __always_inline int
1385 adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
1386 struct vmap_area *va, unsigned long nva_start_addr,
1389 struct vmap_area *lva = NULL;
1390 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1392 if (type == FL_FIT_TYPE) {
1394 * No need to split VA, it fully fits.
1400 unlink_va_augment(va, root);
1401 kmem_cache_free(vmap_area_cachep, va);
1402 } else if (type == LE_FIT_TYPE) {
1404 * Split left edge of fit VA.
1410 va->va_start += size;
1411 } else if (type == RE_FIT_TYPE) {
1413 * Split right edge of fit VA.
1419 va->va_end = nva_start_addr;
1420 } else if (type == NE_FIT_TYPE) {
1422 * Split no edge of fit VA.
1428 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1429 if (unlikely(!lva)) {
1431 * For percpu allocator we do not do any pre-allocation
1432 * and leave it as it is. The reason is it most likely
1433 * never ends up with NE_FIT_TYPE splitting. In case of
1434 * percpu allocations offsets and sizes are aligned to
1435 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1436 * are its main fitting cases.
1438 * There are a few exceptions though, as an example it is
1439 * a first allocation (early boot up) when we have "one"
1440 * big free space that has to be split.
1442 * Also we can hit this path in case of regular "vmap"
1443 * allocations, if "this" current CPU was not preloaded.
1444 * See the comment in alloc_vmap_area() why. If so, then
1445 * GFP_NOWAIT is used instead to get an extra object for
1446 * split purpose. That is rare and most time does not
1449 * What happens if an allocation gets failed. Basically,
1450 * an "overflow" path is triggered to purge lazily freed
1451 * areas to free some memory, then, the "retry" path is
1452 * triggered to repeat one more time. See more details
1453 * in alloc_vmap_area() function.
1455 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1461 * Build the remainder.
1463 lva->va_start = va->va_start;
1464 lva->va_end = nva_start_addr;
1467 * Shrink this VA to remaining size.
1469 va->va_start = nva_start_addr + size;
1474 if (type != FL_FIT_TYPE) {
1475 augment_tree_propagate_from(va);
1477 if (lva) /* type == NE_FIT_TYPE */
1478 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1485 * Returns a start address of the newly allocated area, if success.
1486 * Otherwise a vend is returned that indicates failure.
1488 static __always_inline unsigned long
1489 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1490 unsigned long size, unsigned long align,
1491 unsigned long vstart, unsigned long vend)
1493 bool adjust_search_size = true;
1494 unsigned long nva_start_addr;
1495 struct vmap_area *va;
1499 * Do not adjust when:
1500 * a) align <= PAGE_SIZE, because it does not make any sense.
1501 * All blocks(their start addresses) are at least PAGE_SIZE
1503 * b) a short range where a requested size corresponds to exactly
1504 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1505 * With adjusted search length an allocation would not succeed.
1507 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1508 adjust_search_size = false;
1510 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1514 if (va->va_start > vstart)
1515 nva_start_addr = ALIGN(va->va_start, align);
1517 nva_start_addr = ALIGN(vstart, align);
1519 /* Check the "vend" restriction. */
1520 if (nva_start_addr + size > vend)
1523 /* Update the free vmap_area. */
1524 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
1525 if (WARN_ON_ONCE(ret))
1528 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1529 find_vmap_lowest_match_check(root, head, size, align);
1532 return nva_start_addr;
1536 * Free a region of KVA allocated by alloc_vmap_area
1538 static void free_vmap_area(struct vmap_area *va)
1541 * Remove from the busy tree/list.
1543 spin_lock(&vmap_area_lock);
1544 unlink_va(va, &vmap_area_root);
1545 spin_unlock(&vmap_area_lock);
1548 * Insert/Merge it back to the free tree/list.
1550 spin_lock(&free_vmap_area_lock);
1551 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1552 spin_unlock(&free_vmap_area_lock);
1556 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1558 struct vmap_area *va = NULL;
1561 * Preload this CPU with one extra vmap_area object. It is used
1562 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1563 * a CPU that does an allocation is preloaded.
1565 * We do it in non-atomic context, thus it allows us to use more
1566 * permissive allocation masks to be more stable under low memory
1567 * condition and high memory pressure.
1569 if (!this_cpu_read(ne_fit_preload_node))
1570 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1574 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1575 kmem_cache_free(vmap_area_cachep, va);
1579 * Allocate a region of KVA of the specified size and alignment, within the
1582 static struct vmap_area *alloc_vmap_area(unsigned long size,
1583 unsigned long align,
1584 unsigned long vstart, unsigned long vend,
1585 int node, gfp_t gfp_mask,
1586 unsigned long va_flags)
1588 struct vmap_area *va;
1589 unsigned long freed;
1594 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1595 return ERR_PTR(-EINVAL);
1597 if (unlikely(!vmap_initialized))
1598 return ERR_PTR(-EBUSY);
1601 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1603 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1605 return ERR_PTR(-ENOMEM);
1608 * Only scan the relevant parts containing pointers to other objects
1609 * to avoid false negatives.
1611 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1614 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1615 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1616 size, align, vstart, vend);
1617 spin_unlock(&free_vmap_area_lock);
1619 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1622 * If an allocation fails, the "vend" address is
1623 * returned. Therefore trigger the overflow path.
1625 if (unlikely(addr == vend))
1628 va->va_start = addr;
1629 va->va_end = addr + size;
1631 va->flags = va_flags;
1633 spin_lock(&vmap_area_lock);
1634 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1635 spin_unlock(&vmap_area_lock);
1637 BUG_ON(!IS_ALIGNED(va->va_start, align));
1638 BUG_ON(va->va_start < vstart);
1639 BUG_ON(va->va_end > vend);
1641 ret = kasan_populate_vmalloc(addr, size);
1644 return ERR_PTR(ret);
1651 reclaim_and_purge_vmap_areas();
1657 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1664 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1665 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1668 kmem_cache_free(vmap_area_cachep, va);
1669 return ERR_PTR(-EBUSY);
1672 int register_vmap_purge_notifier(struct notifier_block *nb)
1674 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1676 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1678 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1680 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1682 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1685 * lazy_max_pages is the maximum amount of virtual address space we gather up
1686 * before attempting to purge with a TLB flush.
1688 * There is a tradeoff here: a larger number will cover more kernel page tables
1689 * and take slightly longer to purge, but it will linearly reduce the number of
1690 * global TLB flushes that must be performed. It would seem natural to scale
1691 * this number up linearly with the number of CPUs (because vmapping activity
1692 * could also scale linearly with the number of CPUs), however it is likely
1693 * that in practice, workloads might be constrained in other ways that mean
1694 * vmap activity will not scale linearly with CPUs. Also, I want to be
1695 * conservative and not introduce a big latency on huge systems, so go with
1696 * a less aggressive log scale. It will still be an improvement over the old
1697 * code, and it will be simple to change the scale factor if we find that it
1698 * becomes a problem on bigger systems.
1700 static unsigned long lazy_max_pages(void)
1704 log = fls(num_online_cpus());
1706 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1709 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1712 * Serialize vmap purging. There is no actual critical section protected
1713 * by this lock, but we want to avoid concurrent calls for performance
1714 * reasons and to make the pcpu_get_vm_areas more deterministic.
1716 static DEFINE_MUTEX(vmap_purge_lock);
1718 /* for per-CPU blocks */
1719 static void purge_fragmented_blocks_allcpus(void);
1722 * Purges all lazily-freed vmap areas.
1724 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1726 unsigned long resched_threshold;
1727 unsigned int num_purged_areas = 0;
1728 struct list_head local_purge_list;
1729 struct vmap_area *va, *n_va;
1731 lockdep_assert_held(&vmap_purge_lock);
1733 spin_lock(&purge_vmap_area_lock);
1734 purge_vmap_area_root = RB_ROOT;
1735 list_replace_init(&purge_vmap_area_list, &local_purge_list);
1736 spin_unlock(&purge_vmap_area_lock);
1738 if (unlikely(list_empty(&local_purge_list)))
1742 list_first_entry(&local_purge_list,
1743 struct vmap_area, list)->va_start);
1746 list_last_entry(&local_purge_list,
1747 struct vmap_area, list)->va_end);
1749 flush_tlb_kernel_range(start, end);
1750 resched_threshold = lazy_max_pages() << 1;
1752 spin_lock(&free_vmap_area_lock);
1753 list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
1754 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1755 unsigned long orig_start = va->va_start;
1756 unsigned long orig_end = va->va_end;
1759 * Finally insert or merge lazily-freed area. It is
1760 * detached and there is no need to "unlink" it from
1763 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1764 &free_vmap_area_list);
1769 if (is_vmalloc_or_module_addr((void *)orig_start))
1770 kasan_release_vmalloc(orig_start, orig_end,
1771 va->va_start, va->va_end);
1773 atomic_long_sub(nr, &vmap_lazy_nr);
1776 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1777 cond_resched_lock(&free_vmap_area_lock);
1779 spin_unlock(&free_vmap_area_lock);
1782 trace_purge_vmap_area_lazy(start, end, num_purged_areas);
1783 return num_purged_areas > 0;
1787 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
1789 static void reclaim_and_purge_vmap_areas(void)
1792 mutex_lock(&vmap_purge_lock);
1793 purge_fragmented_blocks_allcpus();
1794 __purge_vmap_area_lazy(ULONG_MAX, 0);
1795 mutex_unlock(&vmap_purge_lock);
1798 static void drain_vmap_area_work(struct work_struct *work)
1800 unsigned long nr_lazy;
1803 mutex_lock(&vmap_purge_lock);
1804 __purge_vmap_area_lazy(ULONG_MAX, 0);
1805 mutex_unlock(&vmap_purge_lock);
1807 /* Recheck if further work is required. */
1808 nr_lazy = atomic_long_read(&vmap_lazy_nr);
1809 } while (nr_lazy > lazy_max_pages());
1813 * Free a vmap area, caller ensuring that the area has been unmapped,
1814 * unlinked and flush_cache_vunmap had been called for the correct
1817 static void free_vmap_area_noflush(struct vmap_area *va)
1819 unsigned long nr_lazy_max = lazy_max_pages();
1820 unsigned long va_start = va->va_start;
1821 unsigned long nr_lazy;
1823 if (WARN_ON_ONCE(!list_empty(&va->list)))
1826 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1827 PAGE_SHIFT, &vmap_lazy_nr);
1830 * Merge or place it to the purge tree/list.
1832 spin_lock(&purge_vmap_area_lock);
1833 merge_or_add_vmap_area(va,
1834 &purge_vmap_area_root, &purge_vmap_area_list);
1835 spin_unlock(&purge_vmap_area_lock);
1837 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
1839 /* After this point, we may free va at any time */
1840 if (unlikely(nr_lazy > nr_lazy_max))
1841 schedule_work(&drain_vmap_work);
1845 * Free and unmap a vmap area
1847 static void free_unmap_vmap_area(struct vmap_area *va)
1849 flush_cache_vunmap(va->va_start, va->va_end);
1850 vunmap_range_noflush(va->va_start, va->va_end);
1851 if (debug_pagealloc_enabled_static())
1852 flush_tlb_kernel_range(va->va_start, va->va_end);
1854 free_vmap_area_noflush(va);
1857 struct vmap_area *find_vmap_area(unsigned long addr)
1859 struct vmap_area *va;
1861 spin_lock(&vmap_area_lock);
1862 va = __find_vmap_area(addr, &vmap_area_root);
1863 spin_unlock(&vmap_area_lock);
1868 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
1870 struct vmap_area *va;
1872 spin_lock(&vmap_area_lock);
1873 va = __find_vmap_area(addr, &vmap_area_root);
1875 unlink_va(va, &vmap_area_root);
1876 spin_unlock(&vmap_area_lock);
1881 /*** Per cpu kva allocator ***/
1884 * vmap space is limited especially on 32 bit architectures. Ensure there is
1885 * room for at least 16 percpu vmap blocks per CPU.
1888 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1889 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1890 * instead (we just need a rough idea)
1892 #if BITS_PER_LONG == 32
1893 #define VMALLOC_SPACE (128UL*1024*1024)
1895 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1898 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1899 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1900 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1901 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1902 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1903 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1904 #define VMAP_BBMAP_BITS \
1905 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1906 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1907 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1909 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1912 * Purge threshold to prevent overeager purging of fragmented blocks for
1913 * regular operations: Purge if vb->free is less than 1/4 of the capacity.
1915 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
1917 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
1918 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
1919 #define VMAP_FLAGS_MASK 0x3
1921 struct vmap_block_queue {
1923 struct list_head free;
1926 * An xarray requires an extra memory dynamically to
1927 * be allocated. If it is an issue, we can use rb-tree
1930 struct xarray vmap_blocks;
1935 struct vmap_area *va;
1936 unsigned long free, dirty;
1937 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
1938 unsigned long dirty_min, dirty_max; /*< dirty range */
1939 struct list_head free_list;
1940 struct rcu_head rcu_head;
1941 struct list_head purge;
1944 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1945 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1948 * In order to fast access to any "vmap_block" associated with a
1949 * specific address, we use a hash.
1951 * A per-cpu vmap_block_queue is used in both ways, to serialize
1952 * an access to free block chains among CPUs(alloc path) and it
1953 * also acts as a vmap_block hash(alloc/free paths). It means we
1954 * overload it, since we already have the per-cpu array which is
1955 * used as a hash table. When used as a hash a 'cpu' passed to
1956 * per_cpu() is not actually a CPU but rather a hash index.
1958 * A hash function is addr_to_vb_xa() which hashes any address
1959 * to a specific index(in a hash) it belongs to. This then uses a
1960 * per_cpu() macro to access an array with generated index.
1967 * 0 10 20 30 40 50 60
1968 * |------|------|------|------|------|------|...<vmap address space>
1969 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
1971 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
1972 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
1974 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
1975 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
1977 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
1978 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
1980 * This technique almost always avoids lock contention on insert/remove,
1981 * however xarray spinlocks protect against any contention that remains.
1983 static struct xarray *
1984 addr_to_vb_xa(unsigned long addr)
1986 int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
1988 return &per_cpu(vmap_block_queue, index).vmap_blocks;
1992 * We should probably have a fallback mechanism to allocate virtual memory
1993 * out of partially filled vmap blocks. However vmap block sizing should be
1994 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1998 static unsigned long addr_to_vb_idx(unsigned long addr)
2000 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2001 addr /= VMAP_BLOCK_SIZE;
2005 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2009 addr = va_start + (pages_off << PAGE_SHIFT);
2010 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2011 return (void *)addr;
2015 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2016 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2017 * @order: how many 2^order pages should be occupied in newly allocated block
2018 * @gfp_mask: flags for the page level allocator
2020 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2022 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2024 struct vmap_block_queue *vbq;
2025 struct vmap_block *vb;
2026 struct vmap_area *va;
2028 unsigned long vb_idx;
2032 node = numa_node_id();
2034 vb = kmalloc_node(sizeof(struct vmap_block),
2035 gfp_mask & GFP_RECLAIM_MASK, node);
2037 return ERR_PTR(-ENOMEM);
2039 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2040 VMALLOC_START, VMALLOC_END,
2042 VMAP_RAM|VMAP_BLOCK);
2045 return ERR_CAST(va);
2048 vaddr = vmap_block_vaddr(va->va_start, 0);
2049 spin_lock_init(&vb->lock);
2051 /* At least something should be left free */
2052 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2053 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2054 vb->free = VMAP_BBMAP_BITS - (1UL << order);
2056 vb->dirty_min = VMAP_BBMAP_BITS;
2058 bitmap_set(vb->used_map, 0, (1UL << order));
2059 INIT_LIST_HEAD(&vb->free_list);
2061 xa = addr_to_vb_xa(va->va_start);
2062 vb_idx = addr_to_vb_idx(va->va_start);
2063 err = xa_insert(xa, vb_idx, vb, gfp_mask);
2067 return ERR_PTR(err);
2070 vbq = raw_cpu_ptr(&vmap_block_queue);
2071 spin_lock(&vbq->lock);
2072 list_add_tail_rcu(&vb->free_list, &vbq->free);
2073 spin_unlock(&vbq->lock);
2078 static void free_vmap_block(struct vmap_block *vb)
2080 struct vmap_block *tmp;
2083 xa = addr_to_vb_xa(vb->va->va_start);
2084 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2087 spin_lock(&vmap_area_lock);
2088 unlink_va(vb->va, &vmap_area_root);
2089 spin_unlock(&vmap_area_lock);
2091 free_vmap_area_noflush(vb->va);
2092 kfree_rcu(vb, rcu_head);
2095 static bool purge_fragmented_block(struct vmap_block *vb,
2096 struct vmap_block_queue *vbq, struct list_head *purge_list,
2099 if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2100 vb->dirty == VMAP_BBMAP_BITS)
2103 /* Don't overeagerly purge usable blocks unless requested */
2104 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2107 /* prevent further allocs after releasing lock */
2108 WRITE_ONCE(vb->free, 0);
2109 /* prevent purging it again */
2110 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2112 vb->dirty_max = VMAP_BBMAP_BITS;
2113 spin_lock(&vbq->lock);
2114 list_del_rcu(&vb->free_list);
2115 spin_unlock(&vbq->lock);
2116 list_add_tail(&vb->purge, purge_list);
2120 static void free_purged_blocks(struct list_head *purge_list)
2122 struct vmap_block *vb, *n_vb;
2124 list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2125 list_del(&vb->purge);
2126 free_vmap_block(vb);
2130 static void purge_fragmented_blocks(int cpu)
2133 struct vmap_block *vb;
2134 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2137 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2138 unsigned long free = READ_ONCE(vb->free);
2139 unsigned long dirty = READ_ONCE(vb->dirty);
2141 if (free + dirty != VMAP_BBMAP_BITS ||
2142 dirty == VMAP_BBMAP_BITS)
2145 spin_lock(&vb->lock);
2146 purge_fragmented_block(vb, vbq, &purge, true);
2147 spin_unlock(&vb->lock);
2150 free_purged_blocks(&purge);
2153 static void purge_fragmented_blocks_allcpus(void)
2157 for_each_possible_cpu(cpu)
2158 purge_fragmented_blocks(cpu);
2161 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2163 struct vmap_block_queue *vbq;
2164 struct vmap_block *vb;
2168 BUG_ON(offset_in_page(size));
2169 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2170 if (WARN_ON(size == 0)) {
2172 * Allocating 0 bytes isn't what caller wants since
2173 * get_order(0) returns funny result. Just warn and terminate
2178 order = get_order(size);
2181 vbq = raw_cpu_ptr(&vmap_block_queue);
2182 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2183 unsigned long pages_off;
2185 if (READ_ONCE(vb->free) < (1UL << order))
2188 spin_lock(&vb->lock);
2189 if (vb->free < (1UL << order)) {
2190 spin_unlock(&vb->lock);
2194 pages_off = VMAP_BBMAP_BITS - vb->free;
2195 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2196 WRITE_ONCE(vb->free, vb->free - (1UL << order));
2197 bitmap_set(vb->used_map, pages_off, (1UL << order));
2198 if (vb->free == 0) {
2199 spin_lock(&vbq->lock);
2200 list_del_rcu(&vb->free_list);
2201 spin_unlock(&vbq->lock);
2204 spin_unlock(&vb->lock);
2210 /* Allocate new block if nothing was found */
2212 vaddr = new_vmap_block(order, gfp_mask);
2217 static void vb_free(unsigned long addr, unsigned long size)
2219 unsigned long offset;
2221 struct vmap_block *vb;
2224 BUG_ON(offset_in_page(size));
2225 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2227 flush_cache_vunmap(addr, addr + size);
2229 order = get_order(size);
2230 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2232 xa = addr_to_vb_xa(addr);
2233 vb = xa_load(xa, addr_to_vb_idx(addr));
2235 spin_lock(&vb->lock);
2236 bitmap_clear(vb->used_map, offset, (1UL << order));
2237 spin_unlock(&vb->lock);
2239 vunmap_range_noflush(addr, addr + size);
2241 if (debug_pagealloc_enabled_static())
2242 flush_tlb_kernel_range(addr, addr + size);
2244 spin_lock(&vb->lock);
2246 /* Expand the not yet TLB flushed dirty range */
2247 vb->dirty_min = min(vb->dirty_min, offset);
2248 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2250 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2251 if (vb->dirty == VMAP_BBMAP_BITS) {
2253 spin_unlock(&vb->lock);
2254 free_vmap_block(vb);
2256 spin_unlock(&vb->lock);
2259 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2261 LIST_HEAD(purge_list);
2264 if (unlikely(!vmap_initialized))
2267 mutex_lock(&vmap_purge_lock);
2269 for_each_possible_cpu(cpu) {
2270 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2271 struct vmap_block *vb;
2275 xa_for_each(&vbq->vmap_blocks, idx, vb) {
2276 spin_lock(&vb->lock);
2279 * Try to purge a fragmented block first. If it's
2280 * not purgeable, check whether there is dirty
2281 * space to be flushed.
2283 if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
2284 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2285 unsigned long va_start = vb->va->va_start;
2288 s = va_start + (vb->dirty_min << PAGE_SHIFT);
2289 e = va_start + (vb->dirty_max << PAGE_SHIFT);
2291 start = min(s, start);
2294 /* Prevent that this is flushed again */
2295 vb->dirty_min = VMAP_BBMAP_BITS;
2300 spin_unlock(&vb->lock);
2304 free_purged_blocks(&purge_list);
2306 if (!__purge_vmap_area_lazy(start, end) && flush)
2307 flush_tlb_kernel_range(start, end);
2308 mutex_unlock(&vmap_purge_lock);
2312 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2314 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2315 * to amortize TLB flushing overheads. What this means is that any page you
2316 * have now, may, in a former life, have been mapped into kernel virtual
2317 * address by the vmap layer and so there might be some CPUs with TLB entries
2318 * still referencing that page (additional to the regular 1:1 kernel mapping).
2320 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2321 * be sure that none of the pages we have control over will have any aliases
2322 * from the vmap layer.
2324 void vm_unmap_aliases(void)
2326 unsigned long start = ULONG_MAX, end = 0;
2329 _vm_unmap_aliases(start, end, flush);
2331 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2334 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2335 * @mem: the pointer returned by vm_map_ram
2336 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2338 void vm_unmap_ram(const void *mem, unsigned int count)
2340 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2341 unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2342 struct vmap_area *va;
2346 BUG_ON(addr < VMALLOC_START);
2347 BUG_ON(addr > VMALLOC_END);
2348 BUG_ON(!PAGE_ALIGNED(addr));
2350 kasan_poison_vmalloc(mem, size);
2352 if (likely(count <= VMAP_MAX_ALLOC)) {
2353 debug_check_no_locks_freed(mem, size);
2354 vb_free(addr, size);
2358 va = find_unlink_vmap_area(addr);
2359 if (WARN_ON_ONCE(!va))
2362 debug_check_no_locks_freed((void *)va->va_start,
2363 (va->va_end - va->va_start));
2364 free_unmap_vmap_area(va);
2366 EXPORT_SYMBOL(vm_unmap_ram);
2369 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2370 * @pages: an array of pointers to the pages to be mapped
2371 * @count: number of pages
2372 * @node: prefer to allocate data structures on this node
2374 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2375 * faster than vmap so it's good. But if you mix long-life and short-life
2376 * objects with vm_map_ram(), it could consume lots of address space through
2377 * fragmentation (especially on a 32bit machine). You could see failures in
2378 * the end. Please use this function for short-lived objects.
2380 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2382 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2384 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2388 if (likely(count <= VMAP_MAX_ALLOC)) {
2389 mem = vb_alloc(size, GFP_KERNEL);
2392 addr = (unsigned long)mem;
2394 struct vmap_area *va;
2395 va = alloc_vmap_area(size, PAGE_SIZE,
2396 VMALLOC_START, VMALLOC_END,
2397 node, GFP_KERNEL, VMAP_RAM);
2401 addr = va->va_start;
2405 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2406 pages, PAGE_SHIFT) < 0) {
2407 vm_unmap_ram(mem, count);
2412 * Mark the pages as accessible, now that they are mapped.
2413 * With hardware tag-based KASAN, marking is skipped for
2414 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2416 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2420 EXPORT_SYMBOL(vm_map_ram);
2422 static struct vm_struct *vmlist __initdata;
2424 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2426 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2427 return vm->page_order;
2433 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2435 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2436 vm->page_order = order;
2443 * vm_area_add_early - add vmap area early during boot
2444 * @vm: vm_struct to add
2446 * This function is used to add fixed kernel vm area to vmlist before
2447 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2448 * should contain proper values and the other fields should be zero.
2450 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2452 void __init vm_area_add_early(struct vm_struct *vm)
2454 struct vm_struct *tmp, **p;
2456 BUG_ON(vmap_initialized);
2457 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2458 if (tmp->addr >= vm->addr) {
2459 BUG_ON(tmp->addr < vm->addr + vm->size);
2462 BUG_ON(tmp->addr + tmp->size > vm->addr);
2469 * vm_area_register_early - register vmap area early during boot
2470 * @vm: vm_struct to register
2471 * @align: requested alignment
2473 * This function is used to register kernel vm area before
2474 * vmalloc_init() is called. @vm->size and @vm->flags should contain
2475 * proper values on entry and other fields should be zero. On return,
2476 * vm->addr contains the allocated address.
2478 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2480 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2482 unsigned long addr = ALIGN(VMALLOC_START, align);
2483 struct vm_struct *cur, **p;
2485 BUG_ON(vmap_initialized);
2487 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
2488 if ((unsigned long)cur->addr - addr >= vm->size)
2490 addr = ALIGN((unsigned long)cur->addr + cur->size, align);
2493 BUG_ON(addr > VMALLOC_END - vm->size);
2494 vm->addr = (void *)addr;
2497 kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2500 static void vmap_init_free_space(void)
2502 unsigned long vmap_start = 1;
2503 const unsigned long vmap_end = ULONG_MAX;
2504 struct vmap_area *busy, *free;
2508 * -|-----|.....|-----|-----|-----|.....|-
2510 * |<--------------------------------->|
2512 list_for_each_entry(busy, &vmap_area_list, list) {
2513 if (busy->va_start - vmap_start > 0) {
2514 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2515 if (!WARN_ON_ONCE(!free)) {
2516 free->va_start = vmap_start;
2517 free->va_end = busy->va_start;
2519 insert_vmap_area_augment(free, NULL,
2520 &free_vmap_area_root,
2521 &free_vmap_area_list);
2525 vmap_start = busy->va_end;
2528 if (vmap_end - vmap_start > 0) {
2529 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2530 if (!WARN_ON_ONCE(!free)) {
2531 free->va_start = vmap_start;
2532 free->va_end = vmap_end;
2534 insert_vmap_area_augment(free, NULL,
2535 &free_vmap_area_root,
2536 &free_vmap_area_list);
2541 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2542 struct vmap_area *va, unsigned long flags, const void *caller)
2545 vm->addr = (void *)va->va_start;
2546 vm->size = va->va_end - va->va_start;
2547 vm->caller = caller;
2551 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2552 unsigned long flags, const void *caller)
2554 spin_lock(&vmap_area_lock);
2555 setup_vmalloc_vm_locked(vm, va, flags, caller);
2556 spin_unlock(&vmap_area_lock);
2559 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2562 * Before removing VM_UNINITIALIZED,
2563 * we should make sure that vm has proper values.
2564 * Pair with smp_rmb() in show_numa_info().
2567 vm->flags &= ~VM_UNINITIALIZED;
2570 static struct vm_struct *__get_vm_area_node(unsigned long size,
2571 unsigned long align, unsigned long shift, unsigned long flags,
2572 unsigned long start, unsigned long end, int node,
2573 gfp_t gfp_mask, const void *caller)
2575 struct vmap_area *va;
2576 struct vm_struct *area;
2577 unsigned long requested_size = size;
2579 BUG_ON(in_interrupt());
2580 size = ALIGN(size, 1ul << shift);
2581 if (unlikely(!size))
2584 if (flags & VM_IOREMAP)
2585 align = 1ul << clamp_t(int, get_count_order_long(size),
2586 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2588 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2589 if (unlikely(!area))
2592 if (!(flags & VM_NO_GUARD))
2595 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2601 setup_vmalloc_vm(area, va, flags, caller);
2604 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2605 * best-effort approach, as they can be mapped outside of vmalloc code.
2606 * For VM_ALLOC mappings, the pages are marked as accessible after
2607 * getting mapped in __vmalloc_node_range().
2608 * With hardware tag-based KASAN, marking is skipped for
2609 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2611 if (!(flags & VM_ALLOC))
2612 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2613 KASAN_VMALLOC_PROT_NORMAL);
2618 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2619 unsigned long start, unsigned long end,
2622 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2623 NUMA_NO_NODE, GFP_KERNEL, caller);
2627 * get_vm_area - reserve a contiguous kernel virtual area
2628 * @size: size of the area
2629 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2631 * Search an area of @size in the kernel virtual mapping area,
2632 * and reserved it for out purposes. Returns the area descriptor
2633 * on success or %NULL on failure.
2635 * Return: the area descriptor on success or %NULL on failure.
2637 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2639 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2640 VMALLOC_START, VMALLOC_END,
2641 NUMA_NO_NODE, GFP_KERNEL,
2642 __builtin_return_address(0));
2645 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2648 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2649 VMALLOC_START, VMALLOC_END,
2650 NUMA_NO_NODE, GFP_KERNEL, caller);
2654 * find_vm_area - find a continuous kernel virtual area
2655 * @addr: base address
2657 * Search for the kernel VM area starting at @addr, and return it.
2658 * It is up to the caller to do all required locking to keep the returned
2661 * Return: the area descriptor on success or %NULL on failure.
2663 struct vm_struct *find_vm_area(const void *addr)
2665 struct vmap_area *va;
2667 va = find_vmap_area((unsigned long)addr);
2675 * remove_vm_area - find and remove a continuous kernel virtual area
2676 * @addr: base address
2678 * Search for the kernel VM area starting at @addr, and remove it.
2679 * This function returns the found VM area, but using it is NOT safe
2680 * on SMP machines, except for its size or flags.
2682 * Return: the area descriptor on success or %NULL on failure.
2684 struct vm_struct *remove_vm_area(const void *addr)
2686 struct vmap_area *va;
2687 struct vm_struct *vm;
2691 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2695 va = find_unlink_vmap_area((unsigned long)addr);
2700 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
2701 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
2702 kasan_free_module_shadow(vm);
2703 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
2705 free_unmap_vmap_area(va);
2709 static inline void set_area_direct_map(const struct vm_struct *area,
2710 int (*set_direct_map)(struct page *page))
2714 /* HUGE_VMALLOC passes small pages to set_direct_map */
2715 for (i = 0; i < area->nr_pages; i++)
2716 if (page_address(area->pages[i]))
2717 set_direct_map(area->pages[i]);
2721 * Flush the vm mapping and reset the direct map.
2723 static void vm_reset_perms(struct vm_struct *area)
2725 unsigned long start = ULONG_MAX, end = 0;
2726 unsigned int page_order = vm_area_page_order(area);
2731 * Find the start and end range of the direct mappings to make sure that
2732 * the vm_unmap_aliases() flush includes the direct map.
2734 for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2735 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2738 unsigned long page_size;
2740 page_size = PAGE_SIZE << page_order;
2741 start = min(addr, start);
2742 end = max(addr + page_size, end);
2748 * Set direct map to something invalid so that it won't be cached if
2749 * there are any accesses after the TLB flush, then flush the TLB and
2750 * reset the direct map permissions to the default.
2752 set_area_direct_map(area, set_direct_map_invalid_noflush);
2753 _vm_unmap_aliases(start, end, flush_dmap);
2754 set_area_direct_map(area, set_direct_map_default_noflush);
2757 static void delayed_vfree_work(struct work_struct *w)
2759 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
2760 struct llist_node *t, *llnode;
2762 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
2767 * vfree_atomic - release memory allocated by vmalloc()
2768 * @addr: memory base address
2770 * This one is just like vfree() but can be called in any atomic context
2773 void vfree_atomic(const void *addr)
2775 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2778 kmemleak_free(addr);
2781 * Use raw_cpu_ptr() because this can be called from preemptible
2782 * context. Preemption is absolutely fine here, because the llist_add()
2783 * implementation is lockless, so it works even if we are adding to
2784 * another cpu's list. schedule_work() should be fine with this too.
2786 if (addr && llist_add((struct llist_node *)addr, &p->list))
2787 schedule_work(&p->wq);
2791 * vfree - Release memory allocated by vmalloc()
2792 * @addr: Memory base address
2794 * Free the virtually continuous memory area starting at @addr, as obtained
2795 * from one of the vmalloc() family of APIs. This will usually also free the
2796 * physical memory underlying the virtual allocation, but that memory is
2797 * reference counted, so it will not be freed until the last user goes away.
2799 * If @addr is NULL, no operation is performed.
2802 * May sleep if called *not* from interrupt context.
2803 * Must not be called in NMI context (strictly speaking, it could be
2804 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2805 * conventions for vfree() arch-dependent would be a really bad idea).
2807 void vfree(const void *addr)
2809 struct vm_struct *vm;
2812 if (unlikely(in_interrupt())) {
2818 kmemleak_free(addr);
2824 vm = remove_vm_area(addr);
2825 if (unlikely(!vm)) {
2826 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2831 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
2833 for (i = 0; i < vm->nr_pages; i++) {
2834 struct page *page = vm->pages[i];
2837 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
2839 * High-order allocs for huge vmallocs are split, so
2840 * can be freed as an array of order-0 allocations
2845 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
2849 EXPORT_SYMBOL(vfree);
2852 * vunmap - release virtual mapping obtained by vmap()
2853 * @addr: memory base address
2855 * Free the virtually contiguous memory area starting at @addr,
2856 * which was created from the page array passed to vmap().
2858 * Must not be called in interrupt context.
2860 void vunmap(const void *addr)
2862 struct vm_struct *vm;
2864 BUG_ON(in_interrupt());
2869 vm = remove_vm_area(addr);
2870 if (unlikely(!vm)) {
2871 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
2877 EXPORT_SYMBOL(vunmap);
2880 * vmap - map an array of pages into virtually contiguous space
2881 * @pages: array of page pointers
2882 * @count: number of pages to map
2883 * @flags: vm_area->flags
2884 * @prot: page protection for the mapping
2886 * Maps @count pages from @pages into contiguous kernel virtual space.
2887 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2888 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2889 * are transferred from the caller to vmap(), and will be freed / dropped when
2890 * vfree() is called on the return value.
2892 * Return: the address of the area or %NULL on failure
2894 void *vmap(struct page **pages, unsigned int count,
2895 unsigned long flags, pgprot_t prot)
2897 struct vm_struct *area;
2899 unsigned long size; /* In bytes */
2903 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
2907 * Your top guard is someone else's bottom guard. Not having a top
2908 * guard compromises someone else's mappings too.
2910 if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2911 flags &= ~VM_NO_GUARD;
2913 if (count > totalram_pages())
2916 size = (unsigned long)count << PAGE_SHIFT;
2917 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2921 addr = (unsigned long)area->addr;
2922 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2923 pages, PAGE_SHIFT) < 0) {
2928 if (flags & VM_MAP_PUT_PAGES) {
2929 area->pages = pages;
2930 area->nr_pages = count;
2934 EXPORT_SYMBOL(vmap);
2936 #ifdef CONFIG_VMAP_PFN
2937 struct vmap_pfn_data {
2938 unsigned long *pfns;
2943 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2945 struct vmap_pfn_data *data = private;
2946 unsigned long pfn = data->pfns[data->idx];
2949 if (WARN_ON_ONCE(pfn_valid(pfn)))
2952 ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
2953 set_pte_at(&init_mm, addr, pte, ptent);
2960 * vmap_pfn - map an array of PFNs into virtually contiguous space
2961 * @pfns: array of PFNs
2962 * @count: number of pages to map
2963 * @prot: page protection for the mapping
2965 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2966 * the start address of the mapping.
2968 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2970 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2971 struct vm_struct *area;
2973 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2974 __builtin_return_address(0));
2977 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2978 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2983 flush_cache_vmap((unsigned long)area->addr,
2984 (unsigned long)area->addr + count * PAGE_SIZE);
2988 EXPORT_SYMBOL_GPL(vmap_pfn);
2989 #endif /* CONFIG_VMAP_PFN */
2991 static inline unsigned int
2992 vm_area_alloc_pages(gfp_t gfp, int nid,
2993 unsigned int order, unsigned int nr_pages, struct page **pages)
2995 unsigned int nr_allocated = 0;
2996 gfp_t alloc_gfp = gfp;
2997 bool nofail = false;
3002 * For order-0 pages we make use of bulk allocator, if
3003 * the page array is partly or not at all populated due
3004 * to fails, fallback to a single page allocator that is
3008 /* bulk allocator doesn't support nofail req. officially */
3009 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
3011 while (nr_allocated < nr_pages) {
3012 unsigned int nr, nr_pages_request;
3015 * A maximum allowed request is hard-coded and is 100
3016 * pages per call. That is done in order to prevent a
3017 * long preemption off scenario in the bulk-allocator
3018 * so the range is [1:100].
3020 nr_pages_request = min(100U, nr_pages - nr_allocated);
3022 /* memory allocation should consider mempolicy, we can't
3023 * wrongly use nearest node when nid == NUMA_NO_NODE,
3024 * otherwise memory may be allocated in only one node,
3025 * but mempolicy wants to alloc memory by interleaving.
3027 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3028 nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
3030 pages + nr_allocated);
3033 nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
3035 pages + nr_allocated);
3041 * If zero or pages were obtained partly,
3042 * fallback to a single page allocator.
3044 if (nr != nr_pages_request)
3047 } else if (gfp & __GFP_NOFAIL) {
3049 * Higher order nofail allocations are really expensive and
3050 * potentially dangerous (pre-mature OOM, disruptive reclaim
3051 * and compaction etc.
3053 alloc_gfp &= ~__GFP_NOFAIL;
3057 /* High-order pages or fallback path if "bulk" fails. */
3058 while (nr_allocated < nr_pages) {
3059 if (fatal_signal_pending(current))
3062 if (nid == NUMA_NO_NODE)
3063 page = alloc_pages(alloc_gfp, order);
3065 page = alloc_pages_node(nid, alloc_gfp, order);
3066 if (unlikely(!page)) {
3070 /* fall back to the zero order allocations */
3071 alloc_gfp |= __GFP_NOFAIL;
3077 * Higher order allocations must be able to be treated as
3078 * indepdenent small pages by callers (as they can with
3079 * small-page vmallocs). Some drivers do their own refcounting
3080 * on vmalloc_to_page() pages, some use page->mapping,
3084 split_page(page, order);
3087 * Careful, we allocate and map page-order pages, but
3088 * tracking is done per PAGE_SIZE page so as to keep the
3089 * vm_struct APIs independent of the physical/mapped size.
3091 for (i = 0; i < (1U << order); i++)
3092 pages[nr_allocated + i] = page + i;
3095 nr_allocated += 1U << order;
3098 return nr_allocated;
3101 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3102 pgprot_t prot, unsigned int page_shift,
3105 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3106 bool nofail = gfp_mask & __GFP_NOFAIL;
3107 unsigned long addr = (unsigned long)area->addr;
3108 unsigned long size = get_vm_area_size(area);
3109 unsigned long array_size;
3110 unsigned int nr_small_pages = size >> PAGE_SHIFT;
3111 unsigned int page_order;
3115 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3117 if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3118 gfp_mask |= __GFP_HIGHMEM;
3120 /* Please note that the recursion is strictly bounded. */
3121 if (array_size > PAGE_SIZE) {
3122 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
3125 area->pages = kmalloc_node(array_size, nested_gfp, node);
3129 warn_alloc(gfp_mask, NULL,
3130 "vmalloc error: size %lu, failed to allocated page array size %lu",
3131 nr_small_pages * PAGE_SIZE, array_size);
3136 set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3137 page_order = vm_area_page_order(area);
3139 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3140 node, page_order, nr_small_pages, area->pages);
3142 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3143 if (gfp_mask & __GFP_ACCOUNT) {
3146 for (i = 0; i < area->nr_pages; i++)
3147 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3151 * If not enough pages were obtained to accomplish an
3152 * allocation request, free them via vfree() if any.
3154 if (area->nr_pages != nr_small_pages) {
3156 * vm_area_alloc_pages() can fail due to insufficient memory but
3159 * - a pending fatal signal
3160 * - insufficient huge page-order pages
3162 * Since we always retry allocations at order-0 in the huge page
3163 * case a warning for either is spurious.
3165 if (!fatal_signal_pending(current) && page_order == 0)
3166 warn_alloc(gfp_mask, NULL,
3167 "vmalloc error: size %lu, failed to allocate pages",
3168 area->nr_pages * PAGE_SIZE);
3173 * page tables allocations ignore external gfp mask, enforce it
3176 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3177 flags = memalloc_nofs_save();
3178 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3179 flags = memalloc_noio_save();
3182 ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3184 if (nofail && (ret < 0))
3185 schedule_timeout_uninterruptible(1);
3186 } while (nofail && (ret < 0));
3188 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3189 memalloc_nofs_restore(flags);
3190 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3191 memalloc_noio_restore(flags);
3194 warn_alloc(gfp_mask, NULL,
3195 "vmalloc error: size %lu, failed to map pages",
3196 area->nr_pages * PAGE_SIZE);
3208 * __vmalloc_node_range - allocate virtually contiguous memory
3209 * @size: allocation size
3210 * @align: desired alignment
3211 * @start: vm area range start
3212 * @end: vm area range end
3213 * @gfp_mask: flags for the page level allocator
3214 * @prot: protection mask for the allocated pages
3215 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3216 * @node: node to use for allocation or NUMA_NO_NODE
3217 * @caller: caller's return address
3219 * Allocate enough pages to cover @size from the page level
3220 * allocator with @gfp_mask flags. Please note that the full set of gfp
3221 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3223 * Zone modifiers are not supported. From the reclaim modifiers
3224 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3225 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3226 * __GFP_RETRY_MAYFAIL are not supported).
3228 * __GFP_NOWARN can be used to suppress failures messages.
3230 * Map them into contiguous kernel virtual space, using a pagetable
3231 * protection of @prot.
3233 * Return: the address of the area or %NULL on failure
3235 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3236 unsigned long start, unsigned long end, gfp_t gfp_mask,
3237 pgprot_t prot, unsigned long vm_flags, int node,
3240 struct vm_struct *area;
3242 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3243 unsigned long real_size = size;
3244 unsigned long real_align = align;
3245 unsigned int shift = PAGE_SHIFT;
3247 if (WARN_ON_ONCE(!size))
3250 if ((size >> PAGE_SHIFT) > totalram_pages()) {
3251 warn_alloc(gfp_mask, NULL,
3252 "vmalloc error: size %lu, exceeds total pages",
3257 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3258 unsigned long size_per_node;
3261 * Try huge pages. Only try for PAGE_KERNEL allocations,
3262 * others like modules don't yet expect huge pages in
3263 * their allocations due to apply_to_page_range not
3267 size_per_node = size;
3268 if (node == NUMA_NO_NODE)
3269 size_per_node /= num_online_nodes();
3270 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3273 shift = arch_vmap_pte_supported_shift(size_per_node);
3275 align = max(real_align, 1UL << shift);
3276 size = ALIGN(real_size, 1UL << shift);
3280 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3281 VM_UNINITIALIZED | vm_flags, start, end, node,
3284 bool nofail = gfp_mask & __GFP_NOFAIL;
3285 warn_alloc(gfp_mask, NULL,
3286 "vmalloc error: size %lu, vm_struct allocation failed%s",
3287 real_size, (nofail) ? ". Retrying." : "");
3289 schedule_timeout_uninterruptible(1);
3296 * Prepare arguments for __vmalloc_area_node() and
3297 * kasan_unpoison_vmalloc().
3299 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3300 if (kasan_hw_tags_enabled()) {
3302 * Modify protection bits to allow tagging.
3303 * This must be done before mapping.
3305 prot = arch_vmap_pgprot_tagged(prot);
3308 * Skip page_alloc poisoning and zeroing for physical
3309 * pages backing VM_ALLOC mapping. Memory is instead
3310 * poisoned and zeroed by kasan_unpoison_vmalloc().
3312 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3315 /* Take note that the mapping is PAGE_KERNEL. */
3316 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3319 /* Allocate physical pages and map them into vmalloc space. */
3320 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3325 * Mark the pages as accessible, now that they are mapped.
3326 * The condition for setting KASAN_VMALLOC_INIT should complement the
3327 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3328 * to make sure that memory is initialized under the same conditions.
3329 * Tag-based KASAN modes only assign tags to normal non-executable
3330 * allocations, see __kasan_unpoison_vmalloc().
3332 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3333 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3334 (gfp_mask & __GFP_SKIP_ZERO))
3335 kasan_flags |= KASAN_VMALLOC_INIT;
3336 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3337 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3340 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3341 * flag. It means that vm_struct is not fully initialized.
3342 * Now, it is fully initialized, so remove this flag here.
3344 clear_vm_uninitialized_flag(area);
3346 size = PAGE_ALIGN(size);
3347 if (!(vm_flags & VM_DEFER_KMEMLEAK))
3348 kmemleak_vmalloc(area, size, gfp_mask);
3353 if (shift > PAGE_SHIFT) {
3364 * __vmalloc_node - allocate virtually contiguous memory
3365 * @size: allocation size
3366 * @align: desired alignment
3367 * @gfp_mask: flags for the page level allocator
3368 * @node: node to use for allocation or NUMA_NO_NODE
3369 * @caller: caller's return address
3371 * Allocate enough pages to cover @size from the page level allocator with
3372 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3374 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3375 * and __GFP_NOFAIL are not supported
3377 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3380 * Return: pointer to the allocated memory or %NULL on error
3382 void *__vmalloc_node(unsigned long size, unsigned long align,
3383 gfp_t gfp_mask, int node, const void *caller)
3385 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3386 gfp_mask, PAGE_KERNEL, 0, node, caller);
3389 * This is only for performance analysis of vmalloc and stress purpose.
3390 * It is required by vmalloc test module, therefore do not use it other
3393 #ifdef CONFIG_TEST_VMALLOC_MODULE
3394 EXPORT_SYMBOL_GPL(__vmalloc_node);
3397 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3399 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3400 __builtin_return_address(0));
3402 EXPORT_SYMBOL(__vmalloc);
3405 * vmalloc - allocate virtually contiguous memory
3406 * @size: allocation size
3408 * Allocate enough pages to cover @size from the page level
3409 * allocator and map them into contiguous kernel virtual space.
3411 * For tight control over page level allocator and protection flags
3412 * use __vmalloc() instead.
3414 * Return: pointer to the allocated memory or %NULL on error
3416 void *vmalloc(unsigned long size)
3418 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3419 __builtin_return_address(0));
3421 EXPORT_SYMBOL(vmalloc);
3424 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3425 * @size: allocation size
3426 * @gfp_mask: flags for the page level allocator
3428 * Allocate enough pages to cover @size from the page level
3429 * allocator and map them into contiguous kernel virtual space.
3430 * If @size is greater than or equal to PMD_SIZE, allow using
3431 * huge pages for the memory
3433 * Return: pointer to the allocated memory or %NULL on error
3435 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
3437 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3438 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3439 NUMA_NO_NODE, __builtin_return_address(0));
3441 EXPORT_SYMBOL_GPL(vmalloc_huge);
3444 * vzalloc - allocate virtually contiguous memory with zero fill
3445 * @size: allocation size
3447 * Allocate enough pages to cover @size from the page level
3448 * allocator and map them into contiguous kernel virtual space.
3449 * The memory allocated is set to zero.
3451 * For tight control over page level allocator and protection flags
3452 * use __vmalloc() instead.
3454 * Return: pointer to the allocated memory or %NULL on error
3456 void *vzalloc(unsigned long size)
3458 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3459 __builtin_return_address(0));
3461 EXPORT_SYMBOL(vzalloc);
3464 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3465 * @size: allocation size
3467 * The resulting memory area is zeroed so it can be mapped to userspace
3468 * without leaking data.
3470 * Return: pointer to the allocated memory or %NULL on error
3472 void *vmalloc_user(unsigned long size)
3474 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3475 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3476 VM_USERMAP, NUMA_NO_NODE,
3477 __builtin_return_address(0));
3479 EXPORT_SYMBOL(vmalloc_user);
3482 * vmalloc_node - allocate memory on a specific node
3483 * @size: allocation size
3486 * Allocate enough pages to cover @size from the page level
3487 * allocator and map them into contiguous kernel virtual space.
3489 * For tight control over page level allocator and protection flags
3490 * use __vmalloc() instead.
3492 * Return: pointer to the allocated memory or %NULL on error
3494 void *vmalloc_node(unsigned long size, int node)
3496 return __vmalloc_node(size, 1, GFP_KERNEL, node,
3497 __builtin_return_address(0));
3499 EXPORT_SYMBOL(vmalloc_node);
3502 * vzalloc_node - allocate memory on a specific node with zero fill
3503 * @size: allocation size
3506 * Allocate enough pages to cover @size from the page level
3507 * allocator and map them into contiguous kernel virtual space.
3508 * The memory allocated is set to zero.
3510 * Return: pointer to the allocated memory or %NULL on error
3512 void *vzalloc_node(unsigned long size, int node)
3514 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3515 __builtin_return_address(0));
3517 EXPORT_SYMBOL(vzalloc_node);
3519 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3520 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3521 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3522 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3525 * 64b systems should always have either DMA or DMA32 zones. For others
3526 * GFP_DMA32 should do the right thing and use the normal zone.
3528 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3532 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3533 * @size: allocation size
3535 * Allocate enough 32bit PA addressable pages to cover @size from the
3536 * page level allocator and map them into contiguous kernel virtual space.
3538 * Return: pointer to the allocated memory or %NULL on error
3540 void *vmalloc_32(unsigned long size)
3542 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3543 __builtin_return_address(0));
3545 EXPORT_SYMBOL(vmalloc_32);
3548 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3549 * @size: allocation size
3551 * The resulting memory area is 32bit addressable and zeroed so it can be
3552 * mapped to userspace without leaking data.
3554 * Return: pointer to the allocated memory or %NULL on error
3556 void *vmalloc_32_user(unsigned long size)
3558 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3559 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3560 VM_USERMAP, NUMA_NO_NODE,
3561 __builtin_return_address(0));
3563 EXPORT_SYMBOL(vmalloc_32_user);
3566 * Atomically zero bytes in the iterator.
3568 * Returns the number of zeroed bytes.
3570 static size_t zero_iter(struct iov_iter *iter, size_t count)
3572 size_t remains = count;
3574 while (remains > 0) {
3577 num = min_t(size_t, remains, PAGE_SIZE);
3578 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
3585 return count - remains;
3589 * small helper routine, copy contents to iter from addr.
3590 * If the page is not present, fill zero.
3592 * Returns the number of copied bytes.
3594 static size_t aligned_vread_iter(struct iov_iter *iter,
3595 const char *addr, size_t count)
3597 size_t remains = count;
3600 while (remains > 0) {
3601 unsigned long offset, length;
3604 offset = offset_in_page(addr);
3605 length = PAGE_SIZE - offset;
3606 if (length > remains)
3608 page = vmalloc_to_page(addr);
3610 * To do safe access to this _mapped_ area, we need lock. But
3611 * adding lock here means that we need to add overhead of
3612 * vmalloc()/vfree() calls for this _debug_ interface, rarely
3613 * used. Instead of that, we'll use an local mapping via
3614 * copy_page_to_iter_nofault() and accept a small overhead in
3615 * this access function.
3618 copied = copy_page_to_iter_nofault(page, offset,
3621 copied = zero_iter(iter, length);
3626 if (copied != length)
3630 return count - remains;
3634 * Read from a vm_map_ram region of memory.
3636 * Returns the number of copied bytes.
3638 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
3639 size_t count, unsigned long flags)
3642 struct vmap_block *vb;
3644 unsigned long offset;
3645 unsigned int rs, re;
3649 * If it's area created by vm_map_ram() interface directly, but
3650 * not further subdividing and delegating management to vmap_block,
3653 if (!(flags & VMAP_BLOCK))
3654 return aligned_vread_iter(iter, addr, count);
3659 * Area is split into regions and tracked with vmap_block, read out
3660 * each region and zero fill the hole between regions.
3662 xa = addr_to_vb_xa((unsigned long) addr);
3663 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
3667 spin_lock(&vb->lock);
3668 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
3669 spin_unlock(&vb->lock);
3673 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
3679 start = vmap_block_vaddr(vb->va->va_start, rs);
3682 size_t to_zero = min_t(size_t, start - addr, remains);
3683 size_t zeroed = zero_iter(iter, to_zero);
3688 if (remains == 0 || zeroed != to_zero)
3692 /*it could start reading from the middle of used region*/
3693 offset = offset_in_page(addr);
3694 n = ((re - rs + 1) << PAGE_SHIFT) - offset;
3698 copied = aligned_vread_iter(iter, start + offset, n);
3707 spin_unlock(&vb->lock);
3710 /* zero-fill the left dirty or free regions */
3711 return count - remains + zero_iter(iter, remains);
3713 /* We couldn't copy/zero everything */
3714 spin_unlock(&vb->lock);
3715 return count - remains;
3719 * vread_iter() - read vmalloc area in a safe way to an iterator.
3720 * @iter: the iterator to which data should be written.
3721 * @addr: vm address.
3722 * @count: number of bytes to be read.
3724 * This function checks that addr is a valid vmalloc'ed area, and
3725 * copy data from that area to a given buffer. If the given memory range
3726 * of [addr...addr+count) includes some valid address, data is copied to
3727 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3728 * IOREMAP area is treated as memory hole and no copy is done.
3730 * If [addr...addr+count) doesn't includes any intersects with alive
3731 * vm_struct area, returns 0. @buf should be kernel's buffer.
3733 * Note: In usual ops, vread() is never necessary because the caller
3734 * should know vmalloc() area is valid and can use memcpy().
3735 * This is for routines which have to access vmalloc area without
3736 * any information, as /proc/kcore.
3738 * Return: number of bytes for which addr and buf should be increased
3739 * (same number as @count) or %0 if [addr...addr+count) doesn't
3740 * include any intersection with valid vmalloc area
3742 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
3744 struct vmap_area *va;
3745 struct vm_struct *vm;
3747 size_t n, size, flags, remains;
3749 addr = kasan_reset_tag(addr);
3751 /* Don't allow overflow */
3752 if ((unsigned long) addr + count < count)
3753 count = -(unsigned long) addr;
3757 spin_lock(&vmap_area_lock);
3758 va = find_vmap_area_exceed_addr((unsigned long)addr);
3762 /* no intersects with alive vmap_area */
3763 if ((unsigned long)addr + remains <= va->va_start)
3766 list_for_each_entry_from(va, &vmap_area_list, list) {
3773 flags = va->flags & VMAP_FLAGS_MASK;
3775 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
3776 * be set together with VMAP_RAM.
3778 WARN_ON(flags == VMAP_BLOCK);
3783 if (vm && (vm->flags & VM_UNINITIALIZED))
3786 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3789 vaddr = (char *) va->va_start;
3790 size = vm ? get_vm_area_size(vm) : va_size(va);
3792 if (addr >= vaddr + size)
3796 size_t to_zero = min_t(size_t, vaddr - addr, remains);
3797 size_t zeroed = zero_iter(iter, to_zero);
3802 if (remains == 0 || zeroed != to_zero)
3806 n = vaddr + size - addr;
3810 if (flags & VMAP_RAM)
3811 copied = vmap_ram_vread_iter(iter, addr, n, flags);
3812 else if (!(vm->flags & VM_IOREMAP))
3813 copied = aligned_vread_iter(iter, addr, n);
3814 else /* IOREMAP area is treated as memory hole */
3815 copied = zero_iter(iter, n);
3825 spin_unlock(&vmap_area_lock);
3826 /* zero-fill memory holes */
3827 return count - remains + zero_iter(iter, remains);
3829 /* Nothing remains, or We couldn't copy/zero everything. */
3830 spin_unlock(&vmap_area_lock);
3832 return count - remains;
3836 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3837 * @vma: vma to cover
3838 * @uaddr: target user address to start at
3839 * @kaddr: virtual address of vmalloc kernel memory
3840 * @pgoff: offset from @kaddr to start at
3841 * @size: size of map area
3843 * Returns: 0 for success, -Exxx on failure
3845 * This function checks that @kaddr is a valid vmalloc'ed area,
3846 * and that it is big enough to cover the range starting at
3847 * @uaddr in @vma. Will return failure if that criteria isn't
3850 * Similar to remap_pfn_range() (see mm/memory.c)
3852 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3853 void *kaddr, unsigned long pgoff,
3856 struct vm_struct *area;
3858 unsigned long end_index;
3860 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3863 size = PAGE_ALIGN(size);
3865 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3868 area = find_vm_area(kaddr);
3872 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3875 if (check_add_overflow(size, off, &end_index) ||
3876 end_index > get_vm_area_size(area))
3881 struct page *page = vmalloc_to_page(kaddr);
3884 ret = vm_insert_page(vma, uaddr, page);
3893 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
3899 * remap_vmalloc_range - map vmalloc pages to userspace
3900 * @vma: vma to cover (map full range of vma)
3901 * @addr: vmalloc memory
3902 * @pgoff: number of pages into addr before first page to map
3904 * Returns: 0 for success, -Exxx on failure
3906 * This function checks that addr is a valid vmalloc'ed area, and
3907 * that it is big enough to cover the vma. Will return failure if
3908 * that criteria isn't met.
3910 * Similar to remap_pfn_range() (see mm/memory.c)
3912 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3913 unsigned long pgoff)
3915 return remap_vmalloc_range_partial(vma, vma->vm_start,
3917 vma->vm_end - vma->vm_start);
3919 EXPORT_SYMBOL(remap_vmalloc_range);
3921 void free_vm_area(struct vm_struct *area)
3923 struct vm_struct *ret;
3924 ret = remove_vm_area(area->addr);
3925 BUG_ON(ret != area);
3928 EXPORT_SYMBOL_GPL(free_vm_area);
3931 static struct vmap_area *node_to_va(struct rb_node *n)
3933 return rb_entry_safe(n, struct vmap_area, rb_node);
3937 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3938 * @addr: target address
3940 * Returns: vmap_area if it is found. If there is no such area
3941 * the first highest(reverse order) vmap_area is returned
3942 * i.e. va->va_start < addr && va->va_end < addr or NULL
3943 * if there are no any areas before @addr.
3945 static struct vmap_area *
3946 pvm_find_va_enclose_addr(unsigned long addr)
3948 struct vmap_area *va, *tmp;
3951 n = free_vmap_area_root.rb_node;
3955 tmp = rb_entry(n, struct vmap_area, rb_node);
3956 if (tmp->va_start <= addr) {
3958 if (tmp->va_end >= addr)
3971 * pvm_determine_end_from_reverse - find the highest aligned address
3972 * of free block below VMALLOC_END
3974 * in - the VA we start the search(reverse order);
3975 * out - the VA with the highest aligned end address.
3976 * @align: alignment for required highest address
3978 * Returns: determined end address within vmap_area
3980 static unsigned long
3981 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3983 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3987 list_for_each_entry_from_reverse((*va),
3988 &free_vmap_area_list, list) {
3989 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3990 if ((*va)->va_start < addr)
3999 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4000 * @offsets: array containing offset of each area
4001 * @sizes: array containing size of each area
4002 * @nr_vms: the number of areas to allocate
4003 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4005 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4006 * vm_structs on success, %NULL on failure
4008 * Percpu allocator wants to use congruent vm areas so that it can
4009 * maintain the offsets among percpu areas. This function allocates
4010 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4011 * be scattered pretty far, distance between two areas easily going up
4012 * to gigabytes. To avoid interacting with regular vmallocs, these
4013 * areas are allocated from top.
4015 * Despite its complicated look, this allocator is rather simple. It
4016 * does everything top-down and scans free blocks from the end looking
4017 * for matching base. While scanning, if any of the areas do not fit the
4018 * base address is pulled down to fit the area. Scanning is repeated till
4019 * all the areas fit and then all necessary data structures are inserted
4020 * and the result is returned.
4022 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4023 const size_t *sizes, int nr_vms,
4026 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4027 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4028 struct vmap_area **vas, *va;
4029 struct vm_struct **vms;
4030 int area, area2, last_area, term_area;
4031 unsigned long base, start, size, end, last_end, orig_start, orig_end;
4032 bool purged = false;
4034 /* verify parameters and allocate data structures */
4035 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4036 for (last_area = 0, area = 0; area < nr_vms; area++) {
4037 start = offsets[area];
4038 end = start + sizes[area];
4040 /* is everything aligned properly? */
4041 BUG_ON(!IS_ALIGNED(offsets[area], align));
4042 BUG_ON(!IS_ALIGNED(sizes[area], align));
4044 /* detect the area with the highest address */
4045 if (start > offsets[last_area])
4048 for (area2 = area + 1; area2 < nr_vms; area2++) {
4049 unsigned long start2 = offsets[area2];
4050 unsigned long end2 = start2 + sizes[area2];
4052 BUG_ON(start2 < end && start < end2);
4055 last_end = offsets[last_area] + sizes[last_area];
4057 if (vmalloc_end - vmalloc_start < last_end) {
4062 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4063 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4067 for (area = 0; area < nr_vms; area++) {
4068 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4069 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4070 if (!vas[area] || !vms[area])
4074 spin_lock(&free_vmap_area_lock);
4076 /* start scanning - we scan from the top, begin with the last area */
4077 area = term_area = last_area;
4078 start = offsets[area];
4079 end = start + sizes[area];
4081 va = pvm_find_va_enclose_addr(vmalloc_end);
4082 base = pvm_determine_end_from_reverse(&va, align) - end;
4086 * base might have underflowed, add last_end before
4089 if (base + last_end < vmalloc_start + last_end)
4093 * Fitting base has not been found.
4099 * If required width exceeds current VA block, move
4100 * base downwards and then recheck.
4102 if (base + end > va->va_end) {
4103 base = pvm_determine_end_from_reverse(&va, align) - end;
4109 * If this VA does not fit, move base downwards and recheck.
4111 if (base + start < va->va_start) {
4112 va = node_to_va(rb_prev(&va->rb_node));
4113 base = pvm_determine_end_from_reverse(&va, align) - end;
4119 * This area fits, move on to the previous one. If
4120 * the previous one is the terminal one, we're done.
4122 area = (area + nr_vms - 1) % nr_vms;
4123 if (area == term_area)
4126 start = offsets[area];
4127 end = start + sizes[area];
4128 va = pvm_find_va_enclose_addr(base + end);
4131 /* we've found a fitting base, insert all va's */
4132 for (area = 0; area < nr_vms; area++) {
4135 start = base + offsets[area];
4138 va = pvm_find_va_enclose_addr(start);
4139 if (WARN_ON_ONCE(va == NULL))
4140 /* It is a BUG(), but trigger recovery instead. */
4143 ret = adjust_va_to_fit_type(&free_vmap_area_root,
4144 &free_vmap_area_list,
4146 if (WARN_ON_ONCE(unlikely(ret)))
4147 /* It is a BUG(), but trigger recovery instead. */
4150 /* Allocated area. */
4152 va->va_start = start;
4153 va->va_end = start + size;
4156 spin_unlock(&free_vmap_area_lock);
4158 /* populate the kasan shadow space */
4159 for (area = 0; area < nr_vms; area++) {
4160 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4161 goto err_free_shadow;
4164 /* insert all vm's */
4165 spin_lock(&vmap_area_lock);
4166 for (area = 0; area < nr_vms; area++) {
4167 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
4169 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
4172 spin_unlock(&vmap_area_lock);
4175 * Mark allocated areas as accessible. Do it now as a best-effort
4176 * approach, as they can be mapped outside of vmalloc code.
4177 * With hardware tag-based KASAN, marking is skipped for
4178 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4180 for (area = 0; area < nr_vms; area++)
4181 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4182 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4189 * Remove previously allocated areas. There is no
4190 * need in removing these areas from the busy tree,
4191 * because they are inserted only on the final step
4192 * and when pcpu_get_vm_areas() is success.
4195 orig_start = vas[area]->va_start;
4196 orig_end = vas[area]->va_end;
4197 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4198 &free_vmap_area_list);
4200 kasan_release_vmalloc(orig_start, orig_end,
4201 va->va_start, va->va_end);
4206 spin_unlock(&free_vmap_area_lock);
4208 reclaim_and_purge_vmap_areas();
4211 /* Before "retry", check if we recover. */
4212 for (area = 0; area < nr_vms; area++) {
4216 vas[area] = kmem_cache_zalloc(
4217 vmap_area_cachep, GFP_KERNEL);
4226 for (area = 0; area < nr_vms; area++) {
4228 kmem_cache_free(vmap_area_cachep, vas[area]);
4238 spin_lock(&free_vmap_area_lock);
4240 * We release all the vmalloc shadows, even the ones for regions that
4241 * hadn't been successfully added. This relies on kasan_release_vmalloc
4242 * being able to tolerate this case.
4244 for (area = 0; area < nr_vms; area++) {
4245 orig_start = vas[area]->va_start;
4246 orig_end = vas[area]->va_end;
4247 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4248 &free_vmap_area_list);
4250 kasan_release_vmalloc(orig_start, orig_end,
4251 va->va_start, va->va_end);
4255 spin_unlock(&free_vmap_area_lock);
4262 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4263 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4264 * @nr_vms: the number of allocated areas
4266 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4268 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4272 for (i = 0; i < nr_vms; i++)
4273 free_vm_area(vms[i]);
4276 #endif /* CONFIG_SMP */
4278 #ifdef CONFIG_PRINTK
4279 bool vmalloc_dump_obj(void *object)
4281 struct vm_struct *vm;
4282 void *objp = (void *)PAGE_ALIGN((unsigned long)object);
4284 vm = find_vm_area(objp);
4287 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4288 vm->nr_pages, (unsigned long)vm->addr, vm->caller);
4293 #ifdef CONFIG_PROC_FS
4294 static void *s_start(struct seq_file *m, loff_t *pos)
4295 __acquires(&vmap_purge_lock)
4296 __acquires(&vmap_area_lock)
4298 mutex_lock(&vmap_purge_lock);
4299 spin_lock(&vmap_area_lock);
4301 return seq_list_start(&vmap_area_list, *pos);
4304 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4306 return seq_list_next(p, &vmap_area_list, pos);
4309 static void s_stop(struct seq_file *m, void *p)
4310 __releases(&vmap_area_lock)
4311 __releases(&vmap_purge_lock)
4313 spin_unlock(&vmap_area_lock);
4314 mutex_unlock(&vmap_purge_lock);
4317 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4319 if (IS_ENABLED(CONFIG_NUMA)) {
4320 unsigned int nr, *counters = m->private;
4321 unsigned int step = 1U << vm_area_page_order(v);
4326 if (v->flags & VM_UNINITIALIZED)
4328 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4331 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4333 for (nr = 0; nr < v->nr_pages; nr += step)
4334 counters[page_to_nid(v->pages[nr])] += step;
4335 for_each_node_state(nr, N_HIGH_MEMORY)
4337 seq_printf(m, " N%u=%u", nr, counters[nr]);
4341 static void show_purge_info(struct seq_file *m)
4343 struct vmap_area *va;
4345 spin_lock(&purge_vmap_area_lock);
4346 list_for_each_entry(va, &purge_vmap_area_list, list) {
4347 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4348 (void *)va->va_start, (void *)va->va_end,
4349 va->va_end - va->va_start);
4351 spin_unlock(&purge_vmap_area_lock);
4354 static int s_show(struct seq_file *m, void *p)
4356 struct vmap_area *va;
4357 struct vm_struct *v;
4359 va = list_entry(p, struct vmap_area, list);
4362 if (va->flags & VMAP_RAM)
4363 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4364 (void *)va->va_start, (void *)va->va_end,
4365 va->va_end - va->va_start);
4372 seq_printf(m, "0x%pK-0x%pK %7ld",
4373 v->addr, v->addr + v->size, v->size);
4376 seq_printf(m, " %pS", v->caller);
4379 seq_printf(m, " pages=%d", v->nr_pages);
4382 seq_printf(m, " phys=%pa", &v->phys_addr);
4384 if (v->flags & VM_IOREMAP)
4385 seq_puts(m, " ioremap");
4387 if (v->flags & VM_ALLOC)
4388 seq_puts(m, " vmalloc");
4390 if (v->flags & VM_MAP)
4391 seq_puts(m, " vmap");
4393 if (v->flags & VM_USERMAP)
4394 seq_puts(m, " user");
4396 if (v->flags & VM_DMA_COHERENT)
4397 seq_puts(m, " dma-coherent");
4399 if (is_vmalloc_addr(v->pages))
4400 seq_puts(m, " vpages");
4402 show_numa_info(m, v);
4406 * As a final step, dump "unpurged" areas.
4409 if (list_is_last(&va->list, &vmap_area_list))
4415 static const struct seq_operations vmalloc_op = {
4422 static int __init proc_vmalloc_init(void)
4424 if (IS_ENABLED(CONFIG_NUMA))
4425 proc_create_seq_private("vmallocinfo", 0400, NULL,
4427 nr_node_ids * sizeof(unsigned int), NULL);
4429 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
4432 module_init(proc_vmalloc_init);
4436 void __init vmalloc_init(void)
4438 struct vmap_area *va;
4439 struct vm_struct *tmp;
4443 * Create the cache for vmap_area objects.
4445 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
4447 for_each_possible_cpu(i) {
4448 struct vmap_block_queue *vbq;
4449 struct vfree_deferred *p;
4451 vbq = &per_cpu(vmap_block_queue, i);
4452 spin_lock_init(&vbq->lock);
4453 INIT_LIST_HEAD(&vbq->free);
4454 p = &per_cpu(vfree_deferred, i);
4455 init_llist_head(&p->list);
4456 INIT_WORK(&p->wq, delayed_vfree_work);
4457 xa_init(&vbq->vmap_blocks);
4460 /* Import existing vmlist entries. */
4461 for (tmp = vmlist; tmp; tmp = tmp->next) {
4462 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4463 if (WARN_ON_ONCE(!va))
4466 va->va_start = (unsigned long)tmp->addr;
4467 va->va_end = va->va_start + tmp->size;
4469 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
4473 * Now we can initialize a free vmap space.
4475 vmap_init_free_space();
4476 vmap_initialized = true;