1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1993 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
9 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/highmem.h>
16 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/set_memory.h>
23 #include <linux/debugobjects.h>
24 #include <linux/kallsyms.h>
25 #include <linux/list.h>
26 #include <linux/notifier.h>
27 #include <linux/rbtree.h>
28 #include <linux/radix-tree.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
39 #include <linux/uaccess.h>
40 #include <asm/tlbflush.h>
41 #include <asm/shmparam.h>
45 bool is_vmalloc_addr(const void *x)
47 unsigned long addr = (unsigned long)x;
49 return addr >= VMALLOC_START && addr < VMALLOC_END;
51 EXPORT_SYMBOL(is_vmalloc_addr);
53 struct vfree_deferred {
54 struct llist_head list;
55 struct work_struct wq;
57 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
59 static void __vunmap(const void *, int);
61 static void free_work(struct work_struct *w)
63 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
64 struct llist_node *t, *llnode;
66 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
67 __vunmap((void *)llnode, 1);
70 /*** Page table manipulation functions ***/
72 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
76 pte = pte_offset_kernel(pmd, addr);
78 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80 } while (pte++, addr += PAGE_SIZE, addr != end);
83 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
88 pmd = pmd_offset(pud, addr);
90 next = pmd_addr_end(addr, end);
91 if (pmd_clear_huge(pmd))
93 if (pmd_none_or_clear_bad(pmd))
95 vunmap_pte_range(pmd, addr, next);
96 } while (pmd++, addr = next, addr != end);
99 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
104 pud = pud_offset(p4d, addr);
106 next = pud_addr_end(addr, end);
107 if (pud_clear_huge(pud))
109 if (pud_none_or_clear_bad(pud))
111 vunmap_pmd_range(pud, addr, next);
112 } while (pud++, addr = next, addr != end);
115 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
120 p4d = p4d_offset(pgd, addr);
122 next = p4d_addr_end(addr, end);
123 if (p4d_clear_huge(p4d))
125 if (p4d_none_or_clear_bad(p4d))
127 vunmap_pud_range(p4d, addr, next);
128 } while (p4d++, addr = next, addr != end);
131 static void vunmap_page_range(unsigned long addr, unsigned long end)
137 pgd = pgd_offset_k(addr);
139 next = pgd_addr_end(addr, end);
140 if (pgd_none_or_clear_bad(pgd))
142 vunmap_p4d_range(pgd, addr, next);
143 } while (pgd++, addr = next, addr != end);
146 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
147 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
152 * nr is a running index into the array which helps higher level
153 * callers keep track of where we're up to.
156 pte = pte_alloc_kernel(pmd, addr);
160 struct page *page = pages[*nr];
162 if (WARN_ON(!pte_none(*pte)))
166 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
168 } while (pte++, addr += PAGE_SIZE, addr != end);
172 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
173 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
178 pmd = pmd_alloc(&init_mm, pud, addr);
182 next = pmd_addr_end(addr, end);
183 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
185 } while (pmd++, addr = next, addr != end);
189 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
190 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
195 pud = pud_alloc(&init_mm, p4d, addr);
199 next = pud_addr_end(addr, end);
200 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
202 } while (pud++, addr = next, addr != end);
206 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
207 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
212 p4d = p4d_alloc(&init_mm, pgd, addr);
216 next = p4d_addr_end(addr, end);
217 if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
219 } while (p4d++, addr = next, addr != end);
224 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
225 * will have pfns corresponding to the "pages" array.
227 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
229 static int vmap_page_range_noflush(unsigned long start, unsigned long end,
230 pgprot_t prot, struct page **pages)
234 unsigned long addr = start;
239 pgd = pgd_offset_k(addr);
241 next = pgd_addr_end(addr, end);
242 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
245 } while (pgd++, addr = next, addr != end);
250 static int vmap_page_range(unsigned long start, unsigned long end,
251 pgprot_t prot, struct page **pages)
255 ret = vmap_page_range_noflush(start, end, prot, pages);
256 flush_cache_vmap(start, end);
260 int is_vmalloc_or_module_addr(const void *x)
263 * ARM, x86-64 and sparc64 put modules in a special place,
264 * and fall back on vmalloc() if that fails. Others
265 * just put it in the vmalloc space.
267 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
268 unsigned long addr = (unsigned long)x;
269 if (addr >= MODULES_VADDR && addr < MODULES_END)
272 return is_vmalloc_addr(x);
276 * Walk a vmap address to the struct page it maps.
278 struct page *vmalloc_to_page(const void *vmalloc_addr)
280 unsigned long addr = (unsigned long) vmalloc_addr;
281 struct page *page = NULL;
282 pgd_t *pgd = pgd_offset_k(addr);
289 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
290 * architectures that do not vmalloc module space
292 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
296 p4d = p4d_offset(pgd, addr);
299 pud = pud_offset(p4d, addr);
302 * Don't dereference bad PUD or PMD (below) entries. This will also
303 * identify huge mappings, which we may encounter on architectures
304 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
305 * identified as vmalloc addresses by is_vmalloc_addr(), but are
306 * not [unambiguously] associated with a struct page, so there is
307 * no correct value to return for them.
309 WARN_ON_ONCE(pud_bad(*pud));
310 if (pud_none(*pud) || pud_bad(*pud))
312 pmd = pmd_offset(pud, addr);
313 WARN_ON_ONCE(pmd_bad(*pmd));
314 if (pmd_none(*pmd) || pmd_bad(*pmd))
317 ptep = pte_offset_map(pmd, addr);
319 if (pte_present(pte))
320 page = pte_page(pte);
324 EXPORT_SYMBOL(vmalloc_to_page);
327 * Map a vmalloc()-space virtual address to the physical page frame number.
329 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
331 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
333 EXPORT_SYMBOL(vmalloc_to_pfn);
336 /*** Global kva allocator ***/
338 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
339 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
342 static DEFINE_SPINLOCK(vmap_area_lock);
343 static DEFINE_SPINLOCK(free_vmap_area_lock);
344 /* Export for kexec only */
345 LIST_HEAD(vmap_area_list);
346 static LLIST_HEAD(vmap_purge_list);
347 static struct rb_root vmap_area_root = RB_ROOT;
348 static bool vmap_initialized __read_mostly;
351 * This kmem_cache is used for vmap_area objects. Instead of
352 * allocating from slab we reuse an object from this cache to
353 * make things faster. Especially in "no edge" splitting of
356 static struct kmem_cache *vmap_area_cachep;
359 * This linked list is used in pair with free_vmap_area_root.
360 * It gives O(1) access to prev/next to perform fast coalescing.
362 static LIST_HEAD(free_vmap_area_list);
365 * This augment red-black tree represents the free vmap space.
366 * All vmap_area objects in this tree are sorted by va->va_start
367 * address. It is used for allocation and merging when a vmap
368 * object is released.
370 * Each vmap_area node contains a maximum available free block
371 * of its sub-tree, right or left. Therefore it is possible to
372 * find a lowest match of free area.
374 static struct rb_root free_vmap_area_root = RB_ROOT;
377 * Preload a CPU with one object for "no edge" split case. The
378 * aim is to get rid of allocations from the atomic context, thus
379 * to use more permissive allocation masks.
381 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
383 static __always_inline unsigned long
384 va_size(struct vmap_area *va)
386 return (va->va_end - va->va_start);
389 static __always_inline unsigned long
390 get_subtree_max_size(struct rb_node *node)
392 struct vmap_area *va;
394 va = rb_entry_safe(node, struct vmap_area, rb_node);
395 return va ? va->subtree_max_size : 0;
399 * Gets called when remove the node and rotate.
401 static __always_inline unsigned long
402 compute_subtree_max_size(struct vmap_area *va)
404 return max3(va_size(va),
405 get_subtree_max_size(va->rb_node.rb_left),
406 get_subtree_max_size(va->rb_node.rb_right));
409 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
410 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
412 static void purge_vmap_area_lazy(void);
413 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
414 static unsigned long lazy_max_pages(void);
416 static atomic_long_t nr_vmalloc_pages;
418 unsigned long vmalloc_nr_pages(void)
420 return atomic_long_read(&nr_vmalloc_pages);
423 static struct vmap_area *__find_vmap_area(unsigned long addr)
425 struct rb_node *n = vmap_area_root.rb_node;
428 struct vmap_area *va;
430 va = rb_entry(n, struct vmap_area, rb_node);
431 if (addr < va->va_start)
433 else if (addr >= va->va_end)
443 * This function returns back addresses of parent node
444 * and its left or right link for further processing.
446 static __always_inline struct rb_node **
447 find_va_links(struct vmap_area *va,
448 struct rb_root *root, struct rb_node *from,
449 struct rb_node **parent)
451 struct vmap_area *tmp_va;
452 struct rb_node **link;
455 link = &root->rb_node;
456 if (unlikely(!*link)) {
465 * Go to the bottom of the tree. When we hit the last point
466 * we end up with parent rb_node and correct direction, i name
467 * it link, where the new va->rb_node will be attached to.
470 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
473 * During the traversal we also do some sanity check.
474 * Trigger the BUG() if there are sides(left/right)
477 if (va->va_start < tmp_va->va_end &&
478 va->va_end <= tmp_va->va_start)
479 link = &(*link)->rb_left;
480 else if (va->va_end > tmp_va->va_start &&
481 va->va_start >= tmp_va->va_end)
482 link = &(*link)->rb_right;
487 *parent = &tmp_va->rb_node;
491 static __always_inline struct list_head *
492 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
494 struct list_head *list;
496 if (unlikely(!parent))
498 * The red-black tree where we try to find VA neighbors
499 * before merging or inserting is empty, i.e. it means
500 * there is no free vmap space. Normally it does not
501 * happen but we handle this case anyway.
505 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
506 return (&parent->rb_right == link ? list->next : list);
509 static __always_inline void
510 link_va(struct vmap_area *va, struct rb_root *root,
511 struct rb_node *parent, struct rb_node **link, struct list_head *head)
514 * VA is still not in the list, but we can
515 * identify its future previous list_head node.
517 if (likely(parent)) {
518 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
519 if (&parent->rb_right != link)
523 /* Insert to the rb-tree */
524 rb_link_node(&va->rb_node, parent, link);
525 if (root == &free_vmap_area_root) {
527 * Some explanation here. Just perform simple insertion
528 * to the tree. We do not set va->subtree_max_size to
529 * its current size before calling rb_insert_augmented().
530 * It is because of we populate the tree from the bottom
531 * to parent levels when the node _is_ in the tree.
533 * Therefore we set subtree_max_size to zero after insertion,
534 * to let __augment_tree_propagate_from() puts everything to
535 * the correct order later on.
537 rb_insert_augmented(&va->rb_node,
538 root, &free_vmap_area_rb_augment_cb);
539 va->subtree_max_size = 0;
541 rb_insert_color(&va->rb_node, root);
544 /* Address-sort this list */
545 list_add(&va->list, head);
548 static __always_inline void
549 unlink_va(struct vmap_area *va, struct rb_root *root)
551 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
554 if (root == &free_vmap_area_root)
555 rb_erase_augmented(&va->rb_node,
556 root, &free_vmap_area_rb_augment_cb);
558 rb_erase(&va->rb_node, root);
561 RB_CLEAR_NODE(&va->rb_node);
564 #if DEBUG_AUGMENT_PROPAGATE_CHECK
566 augment_tree_propagate_check(struct rb_node *n)
568 struct vmap_area *va;
569 struct rb_node *node;
576 va = rb_entry(n, struct vmap_area, rb_node);
577 size = va->subtree_max_size;
581 va = rb_entry(node, struct vmap_area, rb_node);
583 if (get_subtree_max_size(node->rb_left) == size) {
584 node = node->rb_left;
586 if (va_size(va) == size) {
591 node = node->rb_right;
596 va = rb_entry(n, struct vmap_area, rb_node);
597 pr_emerg("tree is corrupted: %lu, %lu\n",
598 va_size(va), va->subtree_max_size);
601 augment_tree_propagate_check(n->rb_left);
602 augment_tree_propagate_check(n->rb_right);
607 * This function populates subtree_max_size from bottom to upper
608 * levels starting from VA point. The propagation must be done
609 * when VA size is modified by changing its va_start/va_end. Or
610 * in case of newly inserting of VA to the tree.
612 * It means that __augment_tree_propagate_from() must be called:
613 * - After VA has been inserted to the tree(free path);
614 * - After VA has been shrunk(allocation path);
615 * - After VA has been increased(merging path).
617 * Please note that, it does not mean that upper parent nodes
618 * and their subtree_max_size are recalculated all the time up
627 * For example if we modify the node 4, shrinking it to 2, then
628 * no any modification is required. If we shrink the node 2 to 1
629 * its subtree_max_size is updated only, and set to 1. If we shrink
630 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
633 static __always_inline void
634 augment_tree_propagate_from(struct vmap_area *va)
636 struct rb_node *node = &va->rb_node;
637 unsigned long new_va_sub_max_size;
640 va = rb_entry(node, struct vmap_area, rb_node);
641 new_va_sub_max_size = compute_subtree_max_size(va);
644 * If the newly calculated maximum available size of the
645 * subtree is equal to the current one, then it means that
646 * the tree is propagated correctly. So we have to stop at
647 * this point to save cycles.
649 if (va->subtree_max_size == new_va_sub_max_size)
652 va->subtree_max_size = new_va_sub_max_size;
653 node = rb_parent(&va->rb_node);
656 #if DEBUG_AUGMENT_PROPAGATE_CHECK
657 augment_tree_propagate_check(free_vmap_area_root.rb_node);
662 insert_vmap_area(struct vmap_area *va,
663 struct rb_root *root, struct list_head *head)
665 struct rb_node **link;
666 struct rb_node *parent;
668 link = find_va_links(va, root, NULL, &parent);
669 link_va(va, root, parent, link, head);
673 insert_vmap_area_augment(struct vmap_area *va,
674 struct rb_node *from, struct rb_root *root,
675 struct list_head *head)
677 struct rb_node **link;
678 struct rb_node *parent;
681 link = find_va_links(va, NULL, from, &parent);
683 link = find_va_links(va, root, NULL, &parent);
685 link_va(va, root, parent, link, head);
686 augment_tree_propagate_from(va);
690 * Merge de-allocated chunk of VA memory with previous
691 * and next free blocks. If coalesce is not done a new
692 * free area is inserted. If VA has been merged, it is
695 static __always_inline struct vmap_area *
696 merge_or_add_vmap_area(struct vmap_area *va,
697 struct rb_root *root, struct list_head *head)
699 struct vmap_area *sibling;
700 struct list_head *next;
701 struct rb_node **link;
702 struct rb_node *parent;
706 * Find a place in the tree where VA potentially will be
707 * inserted, unless it is merged with its sibling/siblings.
709 link = find_va_links(va, root, NULL, &parent);
712 * Get next node of VA to check if merging can be done.
714 next = get_va_next_sibling(parent, link);
715 if (unlikely(next == NULL))
721 * |<------VA------>|<-----Next----->|
726 sibling = list_entry(next, struct vmap_area, list);
727 if (sibling->va_start == va->va_end) {
728 sibling->va_start = va->va_start;
730 /* Check and update the tree if needed. */
731 augment_tree_propagate_from(sibling);
733 /* Free vmap_area object. */
734 kmem_cache_free(vmap_area_cachep, va);
736 /* Point to the new merged area. */
745 * |<-----Prev----->|<------VA------>|
749 if (next->prev != head) {
750 sibling = list_entry(next->prev, struct vmap_area, list);
751 if (sibling->va_end == va->va_start) {
752 sibling->va_end = va->va_end;
754 /* Check and update the tree if needed. */
755 augment_tree_propagate_from(sibling);
760 /* Free vmap_area object. */
761 kmem_cache_free(vmap_area_cachep, va);
763 /* Point to the new merged area. */
771 link_va(va, root, parent, link, head);
772 augment_tree_propagate_from(va);
778 static __always_inline bool
779 is_within_this_va(struct vmap_area *va, unsigned long size,
780 unsigned long align, unsigned long vstart)
782 unsigned long nva_start_addr;
784 if (va->va_start > vstart)
785 nva_start_addr = ALIGN(va->va_start, align);
787 nva_start_addr = ALIGN(vstart, align);
789 /* Can be overflowed due to big size or alignment. */
790 if (nva_start_addr + size < nva_start_addr ||
791 nva_start_addr < vstart)
794 return (nva_start_addr + size <= va->va_end);
798 * Find the first free block(lowest start address) in the tree,
799 * that will accomplish the request corresponding to passing
802 static __always_inline struct vmap_area *
803 find_vmap_lowest_match(unsigned long size,
804 unsigned long align, unsigned long vstart)
806 struct vmap_area *va;
807 struct rb_node *node;
808 unsigned long length;
810 /* Start from the root. */
811 node = free_vmap_area_root.rb_node;
813 /* Adjust the search size for alignment overhead. */
814 length = size + align - 1;
817 va = rb_entry(node, struct vmap_area, rb_node);
819 if (get_subtree_max_size(node->rb_left) >= length &&
820 vstart < va->va_start) {
821 node = node->rb_left;
823 if (is_within_this_va(va, size, align, vstart))
827 * Does not make sense to go deeper towards the right
828 * sub-tree if it does not have a free block that is
829 * equal or bigger to the requested search length.
831 if (get_subtree_max_size(node->rb_right) >= length) {
832 node = node->rb_right;
837 * OK. We roll back and find the first right sub-tree,
838 * that will satisfy the search criteria. It can happen
839 * only once due to "vstart" restriction.
841 while ((node = rb_parent(node))) {
842 va = rb_entry(node, struct vmap_area, rb_node);
843 if (is_within_this_va(va, size, align, vstart))
846 if (get_subtree_max_size(node->rb_right) >= length &&
847 vstart <= va->va_start) {
848 node = node->rb_right;
858 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
859 #include <linux/random.h>
861 static struct vmap_area *
862 find_vmap_lowest_linear_match(unsigned long size,
863 unsigned long align, unsigned long vstart)
865 struct vmap_area *va;
867 list_for_each_entry(va, &free_vmap_area_list, list) {
868 if (!is_within_this_va(va, size, align, vstart))
878 find_vmap_lowest_match_check(unsigned long size)
880 struct vmap_area *va_1, *va_2;
881 unsigned long vstart;
884 get_random_bytes(&rnd, sizeof(rnd));
885 vstart = VMALLOC_START + rnd;
887 va_1 = find_vmap_lowest_match(size, 1, vstart);
888 va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
891 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
898 FL_FIT_TYPE = 1, /* full fit */
899 LE_FIT_TYPE = 2, /* left edge fit */
900 RE_FIT_TYPE = 3, /* right edge fit */
901 NE_FIT_TYPE = 4 /* no edge fit */
904 static __always_inline enum fit_type
905 classify_va_fit_type(struct vmap_area *va,
906 unsigned long nva_start_addr, unsigned long size)
910 /* Check if it is within VA. */
911 if (nva_start_addr < va->va_start ||
912 nva_start_addr + size > va->va_end)
916 if (va->va_start == nva_start_addr) {
917 if (va->va_end == nva_start_addr + size)
921 } else if (va->va_end == nva_start_addr + size) {
930 static __always_inline int
931 adjust_va_to_fit_type(struct vmap_area *va,
932 unsigned long nva_start_addr, unsigned long size,
935 struct vmap_area *lva = NULL;
937 if (type == FL_FIT_TYPE) {
939 * No need to split VA, it fully fits.
945 unlink_va(va, &free_vmap_area_root);
946 kmem_cache_free(vmap_area_cachep, va);
947 } else if (type == LE_FIT_TYPE) {
949 * Split left edge of fit VA.
955 va->va_start += size;
956 } else if (type == RE_FIT_TYPE) {
958 * Split right edge of fit VA.
964 va->va_end = nva_start_addr;
965 } else if (type == NE_FIT_TYPE) {
967 * Split no edge of fit VA.
973 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
974 if (unlikely(!lva)) {
976 * For percpu allocator we do not do any pre-allocation
977 * and leave it as it is. The reason is it most likely
978 * never ends up with NE_FIT_TYPE splitting. In case of
979 * percpu allocations offsets and sizes are aligned to
980 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
981 * are its main fitting cases.
983 * There are a few exceptions though, as an example it is
984 * a first allocation (early boot up) when we have "one"
985 * big free space that has to be split.
987 * Also we can hit this path in case of regular "vmap"
988 * allocations, if "this" current CPU was not preloaded.
989 * See the comment in alloc_vmap_area() why. If so, then
990 * GFP_NOWAIT is used instead to get an extra object for
991 * split purpose. That is rare and most time does not
994 * What happens if an allocation gets failed. Basically,
995 * an "overflow" path is triggered to purge lazily freed
996 * areas to free some memory, then, the "retry" path is
997 * triggered to repeat one more time. See more details
998 * in alloc_vmap_area() function.
1000 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1006 * Build the remainder.
1008 lva->va_start = va->va_start;
1009 lva->va_end = nva_start_addr;
1012 * Shrink this VA to remaining size.
1014 va->va_start = nva_start_addr + size;
1019 if (type != FL_FIT_TYPE) {
1020 augment_tree_propagate_from(va);
1022 if (lva) /* type == NE_FIT_TYPE */
1023 insert_vmap_area_augment(lva, &va->rb_node,
1024 &free_vmap_area_root, &free_vmap_area_list);
1031 * Returns a start address of the newly allocated area, if success.
1032 * Otherwise a vend is returned that indicates failure.
1034 static __always_inline unsigned long
1035 __alloc_vmap_area(unsigned long size, unsigned long align,
1036 unsigned long vstart, unsigned long vend)
1038 unsigned long nva_start_addr;
1039 struct vmap_area *va;
1043 va = find_vmap_lowest_match(size, align, vstart);
1047 if (va->va_start > vstart)
1048 nva_start_addr = ALIGN(va->va_start, align);
1050 nva_start_addr = ALIGN(vstart, align);
1052 /* Check the "vend" restriction. */
1053 if (nva_start_addr + size > vend)
1056 /* Classify what we have found. */
1057 type = classify_va_fit_type(va, nva_start_addr, size);
1058 if (WARN_ON_ONCE(type == NOTHING_FIT))
1061 /* Update the free vmap_area. */
1062 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1066 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1067 find_vmap_lowest_match_check(size);
1070 return nva_start_addr;
1074 * Free a region of KVA allocated by alloc_vmap_area
1076 static void free_vmap_area(struct vmap_area *va)
1079 * Remove from the busy tree/list.
1081 spin_lock(&vmap_area_lock);
1082 unlink_va(va, &vmap_area_root);
1083 spin_unlock(&vmap_area_lock);
1086 * Insert/Merge it back to the free tree/list.
1088 spin_lock(&free_vmap_area_lock);
1089 merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1090 spin_unlock(&free_vmap_area_lock);
1094 * Allocate a region of KVA of the specified size and alignment, within the
1097 static struct vmap_area *alloc_vmap_area(unsigned long size,
1098 unsigned long align,
1099 unsigned long vstart, unsigned long vend,
1100 int node, gfp_t gfp_mask)
1102 struct vmap_area *va, *pva;
1108 BUG_ON(offset_in_page(size));
1109 BUG_ON(!is_power_of_2(align));
1111 if (unlikely(!vmap_initialized))
1112 return ERR_PTR(-EBUSY);
1115 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1117 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1119 return ERR_PTR(-ENOMEM);
1122 * Only scan the relevant parts containing pointers to other objects
1123 * to avoid false negatives.
1125 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1129 * Preload this CPU with one extra vmap_area object. It is used
1130 * when fit type of free area is NE_FIT_TYPE. Please note, it
1131 * does not guarantee that an allocation occurs on a CPU that
1132 * is preloaded, instead we minimize the case when it is not.
1133 * It can happen because of cpu migration, because there is a
1134 * race until the below spinlock is taken.
1136 * The preload is done in non-atomic context, thus it allows us
1137 * to use more permissive allocation masks to be more stable under
1138 * low memory condition and high memory pressure. In rare case,
1139 * if not preloaded, GFP_NOWAIT is used.
1141 * Set "pva" to NULL here, because of "retry" path.
1145 if (!this_cpu_read(ne_fit_preload_node))
1147 * Even if it fails we do not really care about that.
1148 * Just proceed as it is. If needed "overflow" path
1149 * will refill the cache we allocate from.
1151 pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1153 spin_lock(&free_vmap_area_lock);
1155 if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
1156 kmem_cache_free(vmap_area_cachep, pva);
1159 * If an allocation fails, the "vend" address is
1160 * returned. Therefore trigger the overflow path.
1162 addr = __alloc_vmap_area(size, align, vstart, vend);
1163 spin_unlock(&free_vmap_area_lock);
1165 if (unlikely(addr == vend))
1168 va->va_start = addr;
1169 va->va_end = addr + size;
1173 spin_lock(&vmap_area_lock);
1174 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1175 spin_unlock(&vmap_area_lock);
1177 BUG_ON(!IS_ALIGNED(va->va_start, align));
1178 BUG_ON(va->va_start < vstart);
1179 BUG_ON(va->va_end > vend);
1181 ret = kasan_populate_vmalloc(addr, size);
1184 return ERR_PTR(ret);
1191 purge_vmap_area_lazy();
1196 if (gfpflags_allow_blocking(gfp_mask)) {
1197 unsigned long freed = 0;
1198 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1205 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1206 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1209 kmem_cache_free(vmap_area_cachep, va);
1210 return ERR_PTR(-EBUSY);
1213 int register_vmap_purge_notifier(struct notifier_block *nb)
1215 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1217 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1219 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1221 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1223 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1226 * Clear the pagetable entries of a given vmap_area
1228 static void unmap_vmap_area(struct vmap_area *va)
1230 vunmap_page_range(va->va_start, va->va_end);
1234 * lazy_max_pages is the maximum amount of virtual address space we gather up
1235 * before attempting to purge with a TLB flush.
1237 * There is a tradeoff here: a larger number will cover more kernel page tables
1238 * and take slightly longer to purge, but it will linearly reduce the number of
1239 * global TLB flushes that must be performed. It would seem natural to scale
1240 * this number up linearly with the number of CPUs (because vmapping activity
1241 * could also scale linearly with the number of CPUs), however it is likely
1242 * that in practice, workloads might be constrained in other ways that mean
1243 * vmap activity will not scale linearly with CPUs. Also, I want to be
1244 * conservative and not introduce a big latency on huge systems, so go with
1245 * a less aggressive log scale. It will still be an improvement over the old
1246 * code, and it will be simple to change the scale factor if we find that it
1247 * becomes a problem on bigger systems.
1249 static unsigned long lazy_max_pages(void)
1253 log = fls(num_online_cpus());
1255 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1258 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1261 * Serialize vmap purging. There is no actual criticial section protected
1262 * by this look, but we want to avoid concurrent calls for performance
1263 * reasons and to make the pcpu_get_vm_areas more deterministic.
1265 static DEFINE_MUTEX(vmap_purge_lock);
1267 /* for per-CPU blocks */
1268 static void purge_fragmented_blocks_allcpus(void);
1271 * called before a call to iounmap() if the caller wants vm_area_struct's
1272 * immediately freed.
1274 void set_iounmap_nonlazy(void)
1276 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1280 * Purges all lazily-freed vmap areas.
1282 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1284 unsigned long resched_threshold;
1285 struct llist_node *valist;
1286 struct vmap_area *va;
1287 struct vmap_area *n_va;
1289 lockdep_assert_held(&vmap_purge_lock);
1291 valist = llist_del_all(&vmap_purge_list);
1292 if (unlikely(valist == NULL))
1296 * First make sure the mappings are removed from all page-tables
1297 * before they are freed.
1299 vmalloc_sync_unmappings();
1302 * TODO: to calculate a flush range without looping.
1303 * The list can be up to lazy_max_pages() elements.
1305 llist_for_each_entry(va, valist, purge_list) {
1306 if (va->va_start < start)
1307 start = va->va_start;
1308 if (va->va_end > end)
1312 flush_tlb_kernel_range(start, end);
1313 resched_threshold = lazy_max_pages() << 1;
1315 spin_lock(&free_vmap_area_lock);
1316 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1317 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1318 unsigned long orig_start = va->va_start;
1319 unsigned long orig_end = va->va_end;
1322 * Finally insert or merge lazily-freed area. It is
1323 * detached and there is no need to "unlink" it from
1326 va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1327 &free_vmap_area_list);
1329 if (is_vmalloc_or_module_addr((void *)orig_start))
1330 kasan_release_vmalloc(orig_start, orig_end,
1331 va->va_start, va->va_end);
1333 atomic_long_sub(nr, &vmap_lazy_nr);
1335 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1336 cond_resched_lock(&free_vmap_area_lock);
1338 spin_unlock(&free_vmap_area_lock);
1343 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1344 * is already purging.
1346 static void try_purge_vmap_area_lazy(void)
1348 if (mutex_trylock(&vmap_purge_lock)) {
1349 __purge_vmap_area_lazy(ULONG_MAX, 0);
1350 mutex_unlock(&vmap_purge_lock);
1355 * Kick off a purge of the outstanding lazy areas.
1357 static void purge_vmap_area_lazy(void)
1359 mutex_lock(&vmap_purge_lock);
1360 purge_fragmented_blocks_allcpus();
1361 __purge_vmap_area_lazy(ULONG_MAX, 0);
1362 mutex_unlock(&vmap_purge_lock);
1366 * Free a vmap area, caller ensuring that the area has been unmapped
1367 * and flush_cache_vunmap had been called for the correct range
1370 static void free_vmap_area_noflush(struct vmap_area *va)
1372 unsigned long nr_lazy;
1374 spin_lock(&vmap_area_lock);
1375 unlink_va(va, &vmap_area_root);
1376 spin_unlock(&vmap_area_lock);
1378 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1379 PAGE_SHIFT, &vmap_lazy_nr);
1381 /* After this point, we may free va at any time */
1382 llist_add(&va->purge_list, &vmap_purge_list);
1384 if (unlikely(nr_lazy > lazy_max_pages()))
1385 try_purge_vmap_area_lazy();
1389 * Free and unmap a vmap area
1391 static void free_unmap_vmap_area(struct vmap_area *va)
1393 flush_cache_vunmap(va->va_start, va->va_end);
1394 unmap_vmap_area(va);
1395 if (debug_pagealloc_enabled_static())
1396 flush_tlb_kernel_range(va->va_start, va->va_end);
1398 free_vmap_area_noflush(va);
1401 static struct vmap_area *find_vmap_area(unsigned long addr)
1403 struct vmap_area *va;
1405 spin_lock(&vmap_area_lock);
1406 va = __find_vmap_area(addr);
1407 spin_unlock(&vmap_area_lock);
1412 /*** Per cpu kva allocator ***/
1415 * vmap space is limited especially on 32 bit architectures. Ensure there is
1416 * room for at least 16 percpu vmap blocks per CPU.
1419 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1420 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1421 * instead (we just need a rough idea)
1423 #if BITS_PER_LONG == 32
1424 #define VMALLOC_SPACE (128UL*1024*1024)
1426 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1429 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1430 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1431 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1432 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1433 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1434 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1435 #define VMAP_BBMAP_BITS \
1436 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1437 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1438 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1440 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1442 struct vmap_block_queue {
1444 struct list_head free;
1449 struct vmap_area *va;
1450 unsigned long free, dirty;
1451 unsigned long dirty_min, dirty_max; /*< dirty range */
1452 struct list_head free_list;
1453 struct rcu_head rcu_head;
1454 struct list_head purge;
1457 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1458 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1461 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1462 * in the free path. Could get rid of this if we change the API to return a
1463 * "cookie" from alloc, to be passed to free. But no big deal yet.
1465 static DEFINE_SPINLOCK(vmap_block_tree_lock);
1466 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1469 * We should probably have a fallback mechanism to allocate virtual memory
1470 * out of partially filled vmap blocks. However vmap block sizing should be
1471 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1475 static unsigned long addr_to_vb_idx(unsigned long addr)
1477 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1478 addr /= VMAP_BLOCK_SIZE;
1482 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1486 addr = va_start + (pages_off << PAGE_SHIFT);
1487 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1488 return (void *)addr;
1492 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1493 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1494 * @order: how many 2^order pages should be occupied in newly allocated block
1495 * @gfp_mask: flags for the page level allocator
1497 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1499 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1501 struct vmap_block_queue *vbq;
1502 struct vmap_block *vb;
1503 struct vmap_area *va;
1504 unsigned long vb_idx;
1508 node = numa_node_id();
1510 vb = kmalloc_node(sizeof(struct vmap_block),
1511 gfp_mask & GFP_RECLAIM_MASK, node);
1513 return ERR_PTR(-ENOMEM);
1515 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1516 VMALLOC_START, VMALLOC_END,
1520 return ERR_CAST(va);
1523 err = radix_tree_preload(gfp_mask);
1524 if (unlikely(err)) {
1527 return ERR_PTR(err);
1530 vaddr = vmap_block_vaddr(va->va_start, 0);
1531 spin_lock_init(&vb->lock);
1533 /* At least something should be left free */
1534 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1535 vb->free = VMAP_BBMAP_BITS - (1UL << order);
1537 vb->dirty_min = VMAP_BBMAP_BITS;
1539 INIT_LIST_HEAD(&vb->free_list);
1541 vb_idx = addr_to_vb_idx(va->va_start);
1542 spin_lock(&vmap_block_tree_lock);
1543 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1544 spin_unlock(&vmap_block_tree_lock);
1546 radix_tree_preload_end();
1548 vbq = &get_cpu_var(vmap_block_queue);
1549 spin_lock(&vbq->lock);
1550 list_add_tail_rcu(&vb->free_list, &vbq->free);
1551 spin_unlock(&vbq->lock);
1552 put_cpu_var(vmap_block_queue);
1557 static void free_vmap_block(struct vmap_block *vb)
1559 struct vmap_block *tmp;
1560 unsigned long vb_idx;
1562 vb_idx = addr_to_vb_idx(vb->va->va_start);
1563 spin_lock(&vmap_block_tree_lock);
1564 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1565 spin_unlock(&vmap_block_tree_lock);
1568 free_vmap_area_noflush(vb->va);
1569 kfree_rcu(vb, rcu_head);
1572 static void purge_fragmented_blocks(int cpu)
1575 struct vmap_block *vb;
1576 struct vmap_block *n_vb;
1577 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1580 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1582 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1585 spin_lock(&vb->lock);
1586 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1587 vb->free = 0; /* prevent further allocs after releasing lock */
1588 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1590 vb->dirty_max = VMAP_BBMAP_BITS;
1591 spin_lock(&vbq->lock);
1592 list_del_rcu(&vb->free_list);
1593 spin_unlock(&vbq->lock);
1594 spin_unlock(&vb->lock);
1595 list_add_tail(&vb->purge, &purge);
1597 spin_unlock(&vb->lock);
1601 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1602 list_del(&vb->purge);
1603 free_vmap_block(vb);
1607 static void purge_fragmented_blocks_allcpus(void)
1611 for_each_possible_cpu(cpu)
1612 purge_fragmented_blocks(cpu);
1615 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1617 struct vmap_block_queue *vbq;
1618 struct vmap_block *vb;
1622 BUG_ON(offset_in_page(size));
1623 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1624 if (WARN_ON(size == 0)) {
1626 * Allocating 0 bytes isn't what caller wants since
1627 * get_order(0) returns funny result. Just warn and terminate
1632 order = get_order(size);
1635 vbq = &get_cpu_var(vmap_block_queue);
1636 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1637 unsigned long pages_off;
1639 spin_lock(&vb->lock);
1640 if (vb->free < (1UL << order)) {
1641 spin_unlock(&vb->lock);
1645 pages_off = VMAP_BBMAP_BITS - vb->free;
1646 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1647 vb->free -= 1UL << order;
1648 if (vb->free == 0) {
1649 spin_lock(&vbq->lock);
1650 list_del_rcu(&vb->free_list);
1651 spin_unlock(&vbq->lock);
1654 spin_unlock(&vb->lock);
1658 put_cpu_var(vmap_block_queue);
1661 /* Allocate new block if nothing was found */
1663 vaddr = new_vmap_block(order, gfp_mask);
1668 static void vb_free(const void *addr, unsigned long size)
1670 unsigned long offset;
1671 unsigned long vb_idx;
1673 struct vmap_block *vb;
1675 BUG_ON(offset_in_page(size));
1676 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1678 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1680 order = get_order(size);
1682 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1683 offset >>= PAGE_SHIFT;
1685 vb_idx = addr_to_vb_idx((unsigned long)addr);
1687 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1691 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1693 if (debug_pagealloc_enabled_static())
1694 flush_tlb_kernel_range((unsigned long)addr,
1695 (unsigned long)addr + size);
1697 spin_lock(&vb->lock);
1699 /* Expand dirty range */
1700 vb->dirty_min = min(vb->dirty_min, offset);
1701 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1703 vb->dirty += 1UL << order;
1704 if (vb->dirty == VMAP_BBMAP_BITS) {
1706 spin_unlock(&vb->lock);
1707 free_vmap_block(vb);
1709 spin_unlock(&vb->lock);
1712 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1716 if (unlikely(!vmap_initialized))
1721 for_each_possible_cpu(cpu) {
1722 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1723 struct vmap_block *vb;
1726 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1727 spin_lock(&vb->lock);
1729 unsigned long va_start = vb->va->va_start;
1732 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1733 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1735 start = min(s, start);
1740 spin_unlock(&vb->lock);
1745 mutex_lock(&vmap_purge_lock);
1746 purge_fragmented_blocks_allcpus();
1747 if (!__purge_vmap_area_lazy(start, end) && flush)
1748 flush_tlb_kernel_range(start, end);
1749 mutex_unlock(&vmap_purge_lock);
1753 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1755 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1756 * to amortize TLB flushing overheads. What this means is that any page you
1757 * have now, may, in a former life, have been mapped into kernel virtual
1758 * address by the vmap layer and so there might be some CPUs with TLB entries
1759 * still referencing that page (additional to the regular 1:1 kernel mapping).
1761 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1762 * be sure that none of the pages we have control over will have any aliases
1763 * from the vmap layer.
1765 void vm_unmap_aliases(void)
1767 unsigned long start = ULONG_MAX, end = 0;
1770 _vm_unmap_aliases(start, end, flush);
1772 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1775 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1776 * @mem: the pointer returned by vm_map_ram
1777 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1779 void vm_unmap_ram(const void *mem, unsigned int count)
1781 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1782 unsigned long addr = (unsigned long)mem;
1783 struct vmap_area *va;
1787 BUG_ON(addr < VMALLOC_START);
1788 BUG_ON(addr > VMALLOC_END);
1789 BUG_ON(!PAGE_ALIGNED(addr));
1791 kasan_poison_vmalloc(mem, size);
1793 if (likely(count <= VMAP_MAX_ALLOC)) {
1794 debug_check_no_locks_freed(mem, size);
1799 va = find_vmap_area(addr);
1801 debug_check_no_locks_freed((void *)va->va_start,
1802 (va->va_end - va->va_start));
1803 free_unmap_vmap_area(va);
1805 EXPORT_SYMBOL(vm_unmap_ram);
1808 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1809 * @pages: an array of pointers to the pages to be mapped
1810 * @count: number of pages
1811 * @node: prefer to allocate data structures on this node
1812 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1814 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1815 * faster than vmap so it's good. But if you mix long-life and short-life
1816 * objects with vm_map_ram(), it could consume lots of address space through
1817 * fragmentation (especially on a 32bit machine). You could see failures in
1818 * the end. Please use this function for short-lived objects.
1820 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1822 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1824 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1828 if (likely(count <= VMAP_MAX_ALLOC)) {
1829 mem = vb_alloc(size, GFP_KERNEL);
1832 addr = (unsigned long)mem;
1834 struct vmap_area *va;
1835 va = alloc_vmap_area(size, PAGE_SIZE,
1836 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1840 addr = va->va_start;
1844 kasan_unpoison_vmalloc(mem, size);
1846 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1847 vm_unmap_ram(mem, count);
1852 EXPORT_SYMBOL(vm_map_ram);
1854 static struct vm_struct *vmlist __initdata;
1857 * vm_area_add_early - add vmap area early during boot
1858 * @vm: vm_struct to add
1860 * This function is used to add fixed kernel vm area to vmlist before
1861 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1862 * should contain proper values and the other fields should be zero.
1864 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1866 void __init vm_area_add_early(struct vm_struct *vm)
1868 struct vm_struct *tmp, **p;
1870 BUG_ON(vmap_initialized);
1871 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1872 if (tmp->addr >= vm->addr) {
1873 BUG_ON(tmp->addr < vm->addr + vm->size);
1876 BUG_ON(tmp->addr + tmp->size > vm->addr);
1883 * vm_area_register_early - register vmap area early during boot
1884 * @vm: vm_struct to register
1885 * @align: requested alignment
1887 * This function is used to register kernel vm area before
1888 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1889 * proper values on entry and other fields should be zero. On return,
1890 * vm->addr contains the allocated address.
1892 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1894 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1896 static size_t vm_init_off __initdata;
1899 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1900 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1902 vm->addr = (void *)addr;
1904 vm_area_add_early(vm);
1907 static void vmap_init_free_space(void)
1909 unsigned long vmap_start = 1;
1910 const unsigned long vmap_end = ULONG_MAX;
1911 struct vmap_area *busy, *free;
1915 * -|-----|.....|-----|-----|-----|.....|-
1917 * |<--------------------------------->|
1919 list_for_each_entry(busy, &vmap_area_list, list) {
1920 if (busy->va_start - vmap_start > 0) {
1921 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1922 if (!WARN_ON_ONCE(!free)) {
1923 free->va_start = vmap_start;
1924 free->va_end = busy->va_start;
1926 insert_vmap_area_augment(free, NULL,
1927 &free_vmap_area_root,
1928 &free_vmap_area_list);
1932 vmap_start = busy->va_end;
1935 if (vmap_end - vmap_start > 0) {
1936 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1937 if (!WARN_ON_ONCE(!free)) {
1938 free->va_start = vmap_start;
1939 free->va_end = vmap_end;
1941 insert_vmap_area_augment(free, NULL,
1942 &free_vmap_area_root,
1943 &free_vmap_area_list);
1948 void __init vmalloc_init(void)
1950 struct vmap_area *va;
1951 struct vm_struct *tmp;
1955 * Create the cache for vmap_area objects.
1957 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1959 for_each_possible_cpu(i) {
1960 struct vmap_block_queue *vbq;
1961 struct vfree_deferred *p;
1963 vbq = &per_cpu(vmap_block_queue, i);
1964 spin_lock_init(&vbq->lock);
1965 INIT_LIST_HEAD(&vbq->free);
1966 p = &per_cpu(vfree_deferred, i);
1967 init_llist_head(&p->list);
1968 INIT_WORK(&p->wq, free_work);
1971 /* Import existing vmlist entries. */
1972 for (tmp = vmlist; tmp; tmp = tmp->next) {
1973 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1974 if (WARN_ON_ONCE(!va))
1977 va->va_start = (unsigned long)tmp->addr;
1978 va->va_end = va->va_start + tmp->size;
1980 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1984 * Now we can initialize a free vmap space.
1986 vmap_init_free_space();
1987 vmap_initialized = true;
1991 * map_kernel_range_noflush - map kernel VM area with the specified pages
1992 * @addr: start of the VM area to map
1993 * @size: size of the VM area to map
1994 * @prot: page protection flags to use
1995 * @pages: pages to map
1997 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1998 * specify should have been allocated using get_vm_area() and its
2002 * This function does NOT do any cache flushing. The caller is
2003 * responsible for calling flush_cache_vmap() on to-be-mapped areas
2004 * before calling this function.
2007 * The number of pages mapped on success, -errno on failure.
2009 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
2010 pgprot_t prot, struct page **pages)
2012 return vmap_page_range_noflush(addr, addr + size, prot, pages);
2016 * unmap_kernel_range_noflush - unmap kernel VM area
2017 * @addr: start of the VM area to unmap
2018 * @size: size of the VM area to unmap
2020 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
2021 * specify should have been allocated using get_vm_area() and its
2025 * This function does NOT do any cache flushing. The caller is
2026 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
2027 * before calling this function and flush_tlb_kernel_range() after.
2029 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
2031 vunmap_page_range(addr, addr + size);
2033 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
2036 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2037 * @addr: start of the VM area to unmap
2038 * @size: size of the VM area to unmap
2040 * Similar to unmap_kernel_range_noflush() but flushes vcache before
2041 * the unmapping and tlb after.
2043 void unmap_kernel_range(unsigned long addr, unsigned long size)
2045 unsigned long end = addr + size;
2047 flush_cache_vunmap(addr, end);
2048 vunmap_page_range(addr, end);
2049 flush_tlb_kernel_range(addr, end);
2051 EXPORT_SYMBOL_GPL(unmap_kernel_range);
2053 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
2055 unsigned long addr = (unsigned long)area->addr;
2056 unsigned long end = addr + get_vm_area_size(area);
2059 err = vmap_page_range(addr, end, prot, pages);
2061 return err > 0 ? 0 : err;
2063 EXPORT_SYMBOL_GPL(map_vm_area);
2065 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2066 struct vmap_area *va, unsigned long flags, const void *caller)
2069 vm->addr = (void *)va->va_start;
2070 vm->size = va->va_end - va->va_start;
2071 vm->caller = caller;
2075 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2076 unsigned long flags, const void *caller)
2078 spin_lock(&vmap_area_lock);
2079 setup_vmalloc_vm_locked(vm, va, flags, caller);
2080 spin_unlock(&vmap_area_lock);
2083 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2086 * Before removing VM_UNINITIALIZED,
2087 * we should make sure that vm has proper values.
2088 * Pair with smp_rmb() in show_numa_info().
2091 vm->flags &= ~VM_UNINITIALIZED;
2094 static struct vm_struct *__get_vm_area_node(unsigned long size,
2095 unsigned long align, unsigned long flags, unsigned long start,
2096 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2098 struct vmap_area *va;
2099 struct vm_struct *area;
2100 unsigned long requested_size = size;
2102 BUG_ON(in_interrupt());
2103 size = PAGE_ALIGN(size);
2104 if (unlikely(!size))
2107 if (flags & VM_IOREMAP)
2108 align = 1ul << clamp_t(int, get_count_order_long(size),
2109 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2111 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2112 if (unlikely(!area))
2115 if (!(flags & VM_NO_GUARD))
2118 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2124 kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2126 setup_vmalloc_vm(area, va, flags, caller);
2131 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
2132 unsigned long start, unsigned long end)
2134 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2135 GFP_KERNEL, __builtin_return_address(0));
2137 EXPORT_SYMBOL_GPL(__get_vm_area);
2139 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2140 unsigned long start, unsigned long end,
2143 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2144 GFP_KERNEL, caller);
2148 * get_vm_area - reserve a contiguous kernel virtual area
2149 * @size: size of the area
2150 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2152 * Search an area of @size in the kernel virtual mapping area,
2153 * and reserved it for out purposes. Returns the area descriptor
2154 * on success or %NULL on failure.
2156 * Return: the area descriptor on success or %NULL on failure.
2158 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2160 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2161 NUMA_NO_NODE, GFP_KERNEL,
2162 __builtin_return_address(0));
2165 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2168 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2169 NUMA_NO_NODE, GFP_KERNEL, caller);
2173 * find_vm_area - find a continuous kernel virtual area
2174 * @addr: base address
2176 * Search for the kernel VM area starting at @addr, and return it.
2177 * It is up to the caller to do all required locking to keep the returned
2180 * Return: pointer to the found area or %NULL on faulure
2182 struct vm_struct *find_vm_area(const void *addr)
2184 struct vmap_area *va;
2186 va = find_vmap_area((unsigned long)addr);
2194 * remove_vm_area - find and remove a continuous kernel virtual area
2195 * @addr: base address
2197 * Search for the kernel VM area starting at @addr, and remove it.
2198 * This function returns the found VM area, but using it is NOT safe
2199 * on SMP machines, except for its size or flags.
2201 * Return: pointer to the found area or %NULL on faulure
2203 struct vm_struct *remove_vm_area(const void *addr)
2205 struct vmap_area *va;
2209 spin_lock(&vmap_area_lock);
2210 va = __find_vmap_area((unsigned long)addr);
2212 struct vm_struct *vm = va->vm;
2215 spin_unlock(&vmap_area_lock);
2217 kasan_free_shadow(vm);
2218 free_unmap_vmap_area(va);
2223 spin_unlock(&vmap_area_lock);
2227 static inline void set_area_direct_map(const struct vm_struct *area,
2228 int (*set_direct_map)(struct page *page))
2232 for (i = 0; i < area->nr_pages; i++)
2233 if (page_address(area->pages[i]))
2234 set_direct_map(area->pages[i]);
2237 /* Handle removing and resetting vm mappings related to the vm_struct. */
2238 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2240 unsigned long start = ULONG_MAX, end = 0;
2241 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2245 remove_vm_area(area->addr);
2247 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2252 * If not deallocating pages, just do the flush of the VM area and
2255 if (!deallocate_pages) {
2261 * If execution gets here, flush the vm mapping and reset the direct
2262 * map. Find the start and end range of the direct mappings to make sure
2263 * the vm_unmap_aliases() flush includes the direct map.
2265 for (i = 0; i < area->nr_pages; i++) {
2266 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2268 start = min(addr, start);
2269 end = max(addr + PAGE_SIZE, end);
2275 * Set direct map to something invalid so that it won't be cached if
2276 * there are any accesses after the TLB flush, then flush the TLB and
2277 * reset the direct map permissions to the default.
2279 set_area_direct_map(area, set_direct_map_invalid_noflush);
2280 _vm_unmap_aliases(start, end, flush_dmap);
2281 set_area_direct_map(area, set_direct_map_default_noflush);
2284 static void __vunmap(const void *addr, int deallocate_pages)
2286 struct vm_struct *area;
2291 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2295 area = find_vm_area(addr);
2296 if (unlikely(!area)) {
2297 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2302 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2303 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2305 kasan_poison_vmalloc(area->addr, area->size);
2307 vm_remove_mappings(area, deallocate_pages);
2309 if (deallocate_pages) {
2312 for (i = 0; i < area->nr_pages; i++) {
2313 struct page *page = area->pages[i];
2316 __free_pages(page, 0);
2318 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2320 kvfree(area->pages);
2327 static inline void __vfree_deferred(const void *addr)
2330 * Use raw_cpu_ptr() because this can be called from preemptible
2331 * context. Preemption is absolutely fine here, because the llist_add()
2332 * implementation is lockless, so it works even if we are adding to
2333 * nother cpu's list. schedule_work() should be fine with this too.
2335 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2337 if (llist_add((struct llist_node *)addr, &p->list))
2338 schedule_work(&p->wq);
2342 * vfree_atomic - release memory allocated by vmalloc()
2343 * @addr: memory base address
2345 * This one is just like vfree() but can be called in any atomic context
2348 void vfree_atomic(const void *addr)
2352 kmemleak_free(addr);
2356 __vfree_deferred(addr);
2359 static void __vfree(const void *addr)
2361 if (unlikely(in_interrupt()))
2362 __vfree_deferred(addr);
2368 * vfree - release memory allocated by vmalloc()
2369 * @addr: memory base address
2371 * Free the virtually continuous memory area starting at @addr, as
2372 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2373 * NULL, no operation is performed.
2375 * Must not be called in NMI context (strictly speaking, only if we don't
2376 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2377 * conventions for vfree() arch-depenedent would be a really bad idea)
2379 * May sleep if called *not* from interrupt context.
2381 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2383 void vfree(const void *addr)
2387 kmemleak_free(addr);
2389 might_sleep_if(!in_interrupt());
2396 EXPORT_SYMBOL(vfree);
2399 * vunmap - release virtual mapping obtained by vmap()
2400 * @addr: memory base address
2402 * Free the virtually contiguous memory area starting at @addr,
2403 * which was created from the page array passed to vmap().
2405 * Must not be called in interrupt context.
2407 void vunmap(const void *addr)
2409 BUG_ON(in_interrupt());
2414 EXPORT_SYMBOL(vunmap);
2417 * vmap - map an array of pages into virtually contiguous space
2418 * @pages: array of page pointers
2419 * @count: number of pages to map
2420 * @flags: vm_area->flags
2421 * @prot: page protection for the mapping
2423 * Maps @count pages from @pages into contiguous kernel virtual
2426 * Return: the address of the area or %NULL on failure
2428 void *vmap(struct page **pages, unsigned int count,
2429 unsigned long flags, pgprot_t prot)
2431 struct vm_struct *area;
2432 unsigned long size; /* In bytes */
2436 if (count > totalram_pages())
2439 size = (unsigned long)count << PAGE_SHIFT;
2440 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2444 if (map_vm_area(area, prot, pages)) {
2451 EXPORT_SYMBOL(vmap);
2453 static void *__vmalloc_node(unsigned long size, unsigned long align,
2454 gfp_t gfp_mask, pgprot_t prot,
2455 int node, const void *caller);
2456 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2457 pgprot_t prot, int node)
2459 struct page **pages;
2460 unsigned int nr_pages, array_size, i;
2461 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2462 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2463 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2467 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2468 array_size = (nr_pages * sizeof(struct page *));
2470 /* Please note that the recursion is strictly bounded. */
2471 if (array_size > PAGE_SIZE) {
2472 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2473 PAGE_KERNEL, node, area->caller);
2475 pages = kmalloc_node(array_size, nested_gfp, node);
2479 remove_vm_area(area->addr);
2484 area->pages = pages;
2485 area->nr_pages = nr_pages;
2487 for (i = 0; i < area->nr_pages; i++) {
2490 if (node == NUMA_NO_NODE)
2491 page = alloc_page(alloc_mask|highmem_mask);
2493 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2495 if (unlikely(!page)) {
2496 /* Successfully allocated i pages, free them in __vunmap() */
2498 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2501 area->pages[i] = page;
2502 if (gfpflags_allow_blocking(gfp_mask))
2505 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2507 if (map_vm_area(area, prot, pages))
2512 warn_alloc(gfp_mask, NULL,
2513 "vmalloc: allocation failure, allocated %ld of %ld bytes",
2514 (area->nr_pages*PAGE_SIZE), area->size);
2515 __vfree(area->addr);
2520 * __vmalloc_node_range - allocate virtually contiguous memory
2521 * @size: allocation size
2522 * @align: desired alignment
2523 * @start: vm area range start
2524 * @end: vm area range end
2525 * @gfp_mask: flags for the page level allocator
2526 * @prot: protection mask for the allocated pages
2527 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2528 * @node: node to use for allocation or NUMA_NO_NODE
2529 * @caller: caller's return address
2531 * Allocate enough pages to cover @size from the page level
2532 * allocator with @gfp_mask flags. Map them into contiguous
2533 * kernel virtual space, using a pagetable protection of @prot.
2535 * Return: the address of the area or %NULL on failure
2537 void *__vmalloc_node_range(unsigned long size, unsigned long align,
2538 unsigned long start, unsigned long end, gfp_t gfp_mask,
2539 pgprot_t prot, unsigned long vm_flags, int node,
2542 struct vm_struct *area;
2544 unsigned long real_size = size;
2546 size = PAGE_ALIGN(size);
2547 if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2550 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2551 vm_flags, start, end, node, gfp_mask, caller);
2555 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2560 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2561 * flag. It means that vm_struct is not fully initialized.
2562 * Now, it is fully initialized, so remove this flag here.
2564 clear_vm_uninitialized_flag(area);
2566 kmemleak_vmalloc(area, size, gfp_mask);
2571 warn_alloc(gfp_mask, NULL,
2572 "vmalloc: allocation failure: %lu bytes", real_size);
2577 * This is only for performance analysis of vmalloc and stress purpose.
2578 * It is required by vmalloc test module, therefore do not use it other
2581 #ifdef CONFIG_TEST_VMALLOC_MODULE
2582 EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2586 * __vmalloc_node - allocate virtually contiguous memory
2587 * @size: allocation size
2588 * @align: desired alignment
2589 * @gfp_mask: flags for the page level allocator
2590 * @prot: protection mask for the allocated pages
2591 * @node: node to use for allocation or NUMA_NO_NODE
2592 * @caller: caller's return address
2594 * Allocate enough pages to cover @size from the page level
2595 * allocator with @gfp_mask flags. Map them into contiguous
2596 * kernel virtual space, using a pagetable protection of @prot.
2598 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2599 * and __GFP_NOFAIL are not supported
2601 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2604 * Return: pointer to the allocated memory or %NULL on error
2606 static void *__vmalloc_node(unsigned long size, unsigned long align,
2607 gfp_t gfp_mask, pgprot_t prot,
2608 int node, const void *caller)
2610 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2611 gfp_mask, prot, 0, node, caller);
2614 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
2616 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
2617 __builtin_return_address(0));
2619 EXPORT_SYMBOL(__vmalloc);
2621 static inline void *__vmalloc_node_flags(unsigned long size,
2622 int node, gfp_t flags)
2624 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2625 node, __builtin_return_address(0));
2629 void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2632 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2636 * vmalloc - allocate virtually contiguous memory
2637 * @size: allocation size
2639 * Allocate enough pages to cover @size from the page level
2640 * allocator and map them into contiguous kernel virtual space.
2642 * For tight control over page level allocator and protection flags
2643 * use __vmalloc() instead.
2645 * Return: pointer to the allocated memory or %NULL on error
2647 void *vmalloc(unsigned long size)
2649 return __vmalloc_node_flags(size, NUMA_NO_NODE,
2652 EXPORT_SYMBOL(vmalloc);
2655 * vzalloc - allocate virtually contiguous memory with zero fill
2656 * @size: allocation size
2658 * Allocate enough pages to cover @size from the page level
2659 * allocator and map them into contiguous kernel virtual space.
2660 * The memory allocated is set to zero.
2662 * For tight control over page level allocator and protection flags
2663 * use __vmalloc() instead.
2665 * Return: pointer to the allocated memory or %NULL on error
2667 void *vzalloc(unsigned long size)
2669 return __vmalloc_node_flags(size, NUMA_NO_NODE,
2670 GFP_KERNEL | __GFP_ZERO);
2672 EXPORT_SYMBOL(vzalloc);
2675 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2676 * @size: allocation size
2678 * The resulting memory area is zeroed so it can be mapped to userspace
2679 * without leaking data.
2681 * Return: pointer to the allocated memory or %NULL on error
2683 void *vmalloc_user(unsigned long size)
2685 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2686 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2687 VM_USERMAP, NUMA_NO_NODE,
2688 __builtin_return_address(0));
2690 EXPORT_SYMBOL(vmalloc_user);
2693 * vmalloc_node - allocate memory on a specific node
2694 * @size: allocation size
2697 * Allocate enough pages to cover @size from the page level
2698 * allocator and map them into contiguous kernel virtual space.
2700 * For tight control over page level allocator and protection flags
2701 * use __vmalloc() instead.
2703 * Return: pointer to the allocated memory or %NULL on error
2705 void *vmalloc_node(unsigned long size, int node)
2707 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
2708 node, __builtin_return_address(0));
2710 EXPORT_SYMBOL(vmalloc_node);
2713 * vzalloc_node - allocate memory on a specific node with zero fill
2714 * @size: allocation size
2717 * Allocate enough pages to cover @size from the page level
2718 * allocator and map them into contiguous kernel virtual space.
2719 * The memory allocated is set to zero.
2721 * For tight control over page level allocator and protection flags
2722 * use __vmalloc_node() instead.
2724 * Return: pointer to the allocated memory or %NULL on error
2726 void *vzalloc_node(unsigned long size, int node)
2728 return __vmalloc_node_flags(size, node,
2729 GFP_KERNEL | __GFP_ZERO);
2731 EXPORT_SYMBOL(vzalloc_node);
2734 * vmalloc_user_node_flags - allocate memory for userspace on a specific node
2735 * @size: allocation size
2737 * @flags: flags for the page level allocator
2739 * The resulting memory area is zeroed so it can be mapped to userspace
2740 * without leaking data.
2742 * Return: pointer to the allocated memory or %NULL on error
2744 void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags)
2746 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2747 flags | __GFP_ZERO, PAGE_KERNEL,
2749 __builtin_return_address(0));
2751 EXPORT_SYMBOL(vmalloc_user_node_flags);
2754 * vmalloc_exec - allocate virtually contiguous, executable memory
2755 * @size: allocation size
2757 * Kernel-internal function to allocate enough pages to cover @size
2758 * the page level allocator and map them into contiguous and
2759 * executable kernel virtual space.
2761 * For tight control over page level allocator and protection flags
2762 * use __vmalloc() instead.
2764 * Return: pointer to the allocated memory or %NULL on error
2766 void *vmalloc_exec(unsigned long size)
2768 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2769 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2770 NUMA_NO_NODE, __builtin_return_address(0));
2773 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2774 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2775 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2776 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2779 * 64b systems should always have either DMA or DMA32 zones. For others
2780 * GFP_DMA32 should do the right thing and use the normal zone.
2782 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2786 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2787 * @size: allocation size
2789 * Allocate enough 32bit PA addressable pages to cover @size from the
2790 * page level allocator and map them into contiguous kernel virtual space.
2792 * Return: pointer to the allocated memory or %NULL on error
2794 void *vmalloc_32(unsigned long size)
2796 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
2797 NUMA_NO_NODE, __builtin_return_address(0));
2799 EXPORT_SYMBOL(vmalloc_32);
2802 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2803 * @size: allocation size
2805 * The resulting memory area is 32bit addressable and zeroed so it can be
2806 * mapped to userspace without leaking data.
2808 * Return: pointer to the allocated memory or %NULL on error
2810 void *vmalloc_32_user(unsigned long size)
2812 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2813 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2814 VM_USERMAP, NUMA_NO_NODE,
2815 __builtin_return_address(0));
2817 EXPORT_SYMBOL(vmalloc_32_user);
2820 * small helper routine , copy contents to buf from addr.
2821 * If the page is not present, fill zero.
2824 static int aligned_vread(char *buf, char *addr, unsigned long count)
2830 unsigned long offset, length;
2832 offset = offset_in_page(addr);
2833 length = PAGE_SIZE - offset;
2836 p = vmalloc_to_page(addr);
2838 * To do safe access to this _mapped_ area, we need
2839 * lock. But adding lock here means that we need to add
2840 * overhead of vmalloc()/vfree() calles for this _debug_
2841 * interface, rarely used. Instead of that, we'll use
2842 * kmap() and get small overhead in this access function.
2846 * we can expect USER0 is not used (see vread/vwrite's
2847 * function description)
2849 void *map = kmap_atomic(p);
2850 memcpy(buf, map + offset, length);
2853 memset(buf, 0, length);
2863 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2869 unsigned long offset, length;
2871 offset = offset_in_page(addr);
2872 length = PAGE_SIZE - offset;
2875 p = vmalloc_to_page(addr);
2877 * To do safe access to this _mapped_ area, we need
2878 * lock. But adding lock here means that we need to add
2879 * overhead of vmalloc()/vfree() calles for this _debug_
2880 * interface, rarely used. Instead of that, we'll use
2881 * kmap() and get small overhead in this access function.
2885 * we can expect USER0 is not used (see vread/vwrite's
2886 * function description)
2888 void *map = kmap_atomic(p);
2889 memcpy(map + offset, buf, length);
2901 * vread() - read vmalloc area in a safe way.
2902 * @buf: buffer for reading data
2903 * @addr: vm address.
2904 * @count: number of bytes to be read.
2906 * This function checks that addr is a valid vmalloc'ed area, and
2907 * copy data from that area to a given buffer. If the given memory range
2908 * of [addr...addr+count) includes some valid address, data is copied to
2909 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2910 * IOREMAP area is treated as memory hole and no copy is done.
2912 * If [addr...addr+count) doesn't includes any intersects with alive
2913 * vm_struct area, returns 0. @buf should be kernel's buffer.
2915 * Note: In usual ops, vread() is never necessary because the caller
2916 * should know vmalloc() area is valid and can use memcpy().
2917 * This is for routines which have to access vmalloc area without
2918 * any information, as /dev/kmem.
2920 * Return: number of bytes for which addr and buf should be increased
2921 * (same number as @count) or %0 if [addr...addr+count) doesn't
2922 * include any intersection with valid vmalloc area
2924 long vread(char *buf, char *addr, unsigned long count)
2926 struct vmap_area *va;
2927 struct vm_struct *vm;
2928 char *vaddr, *buf_start = buf;
2929 unsigned long buflen = count;
2932 /* Don't allow overflow */
2933 if ((unsigned long) addr + count < count)
2934 count = -(unsigned long) addr;
2936 spin_lock(&vmap_area_lock);
2937 list_for_each_entry(va, &vmap_area_list, list) {
2945 vaddr = (char *) vm->addr;
2946 if (addr >= vaddr + get_vm_area_size(vm))
2948 while (addr < vaddr) {
2956 n = vaddr + get_vm_area_size(vm) - addr;
2959 if (!(vm->flags & VM_IOREMAP))
2960 aligned_vread(buf, addr, n);
2961 else /* IOREMAP area is treated as memory hole */
2968 spin_unlock(&vmap_area_lock);
2970 if (buf == buf_start)
2972 /* zero-fill memory holes */
2973 if (buf != buf_start + buflen)
2974 memset(buf, 0, buflen - (buf - buf_start));
2980 * vwrite() - write vmalloc area in a safe way.
2981 * @buf: buffer for source data
2982 * @addr: vm address.
2983 * @count: number of bytes to be read.
2985 * This function checks that addr is a valid vmalloc'ed area, and
2986 * copy data from a buffer to the given addr. If specified range of
2987 * [addr...addr+count) includes some valid address, data is copied from
2988 * proper area of @buf. If there are memory holes, no copy to hole.
2989 * IOREMAP area is treated as memory hole and no copy is done.
2991 * If [addr...addr+count) doesn't includes any intersects with alive
2992 * vm_struct area, returns 0. @buf should be kernel's buffer.
2994 * Note: In usual ops, vwrite() is never necessary because the caller
2995 * should know vmalloc() area is valid and can use memcpy().
2996 * This is for routines which have to access vmalloc area without
2997 * any information, as /dev/kmem.
2999 * Return: number of bytes for which addr and buf should be
3000 * increased (same number as @count) or %0 if [addr...addr+count)
3001 * doesn't include any intersection with valid vmalloc area
3003 long vwrite(char *buf, char *addr, unsigned long count)
3005 struct vmap_area *va;
3006 struct vm_struct *vm;
3008 unsigned long n, buflen;
3011 /* Don't allow overflow */
3012 if ((unsigned long) addr + count < count)
3013 count = -(unsigned long) addr;
3016 spin_lock(&vmap_area_lock);
3017 list_for_each_entry(va, &vmap_area_list, list) {
3025 vaddr = (char *) vm->addr;
3026 if (addr >= vaddr + get_vm_area_size(vm))
3028 while (addr < vaddr) {
3035 n = vaddr + get_vm_area_size(vm) - addr;
3038 if (!(vm->flags & VM_IOREMAP)) {
3039 aligned_vwrite(buf, addr, n);
3047 spin_unlock(&vmap_area_lock);
3054 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3055 * @vma: vma to cover
3056 * @uaddr: target user address to start at
3057 * @kaddr: virtual address of vmalloc kernel memory
3058 * @pgoff: offset from @kaddr to start at
3059 * @size: size of map area
3061 * Returns: 0 for success, -Exxx on failure
3063 * This function checks that @kaddr is a valid vmalloc'ed area,
3064 * and that it is big enough to cover the range starting at
3065 * @uaddr in @vma. Will return failure if that criteria isn't
3068 * Similar to remap_pfn_range() (see mm/memory.c)
3070 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3071 void *kaddr, unsigned long pgoff,
3074 struct vm_struct *area;
3076 unsigned long end_index;
3078 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3081 size = PAGE_ALIGN(size);
3083 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3086 area = find_vm_area(kaddr);
3090 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3093 if (check_add_overflow(size, off, &end_index) ||
3094 end_index > get_vm_area_size(area))
3099 struct page *page = vmalloc_to_page(kaddr);
3102 ret = vm_insert_page(vma, uaddr, page);
3111 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3115 EXPORT_SYMBOL(remap_vmalloc_range_partial);
3118 * remap_vmalloc_range - map vmalloc pages to userspace
3119 * @vma: vma to cover (map full range of vma)
3120 * @addr: vmalloc memory
3121 * @pgoff: number of pages into addr before first page to map
3123 * Returns: 0 for success, -Exxx on failure
3125 * This function checks that addr is a valid vmalloc'ed area, and
3126 * that it is big enough to cover the vma. Will return failure if
3127 * that criteria isn't met.
3129 * Similar to remap_pfn_range() (see mm/memory.c)
3131 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3132 unsigned long pgoff)
3134 return remap_vmalloc_range_partial(vma, vma->vm_start,
3136 vma->vm_end - vma->vm_start);
3138 EXPORT_SYMBOL(remap_vmalloc_range);
3141 * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
3144 * The purpose of this function is to make sure the vmalloc area
3145 * mappings are identical in all page-tables in the system.
3147 void __weak vmalloc_sync_mappings(void)
3151 void __weak vmalloc_sync_unmappings(void)
3155 static int f(pte_t *pte, unsigned long addr, void *data)
3167 * alloc_vm_area - allocate a range of kernel address space
3168 * @size: size of the area
3169 * @ptes: returns the PTEs for the address space
3171 * Returns: NULL on failure, vm_struct on success
3173 * This function reserves a range of kernel address space, and
3174 * allocates pagetables to map that range. No actual mappings
3177 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3178 * allocated for the VM area are returned.
3180 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3182 struct vm_struct *area;
3184 area = get_vm_area_caller(size, VM_IOREMAP,
3185 __builtin_return_address(0));
3190 * This ensures that page tables are constructed for this region
3191 * of kernel virtual address space and mapped into init_mm.
3193 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3194 size, f, ptes ? &ptes : NULL)) {
3201 EXPORT_SYMBOL_GPL(alloc_vm_area);
3203 void free_vm_area(struct vm_struct *area)
3205 struct vm_struct *ret;
3206 ret = remove_vm_area(area->addr);
3207 BUG_ON(ret != area);
3210 EXPORT_SYMBOL_GPL(free_vm_area);
3213 static struct vmap_area *node_to_va(struct rb_node *n)
3215 return rb_entry_safe(n, struct vmap_area, rb_node);
3219 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3220 * @addr: target address
3222 * Returns: vmap_area if it is found. If there is no such area
3223 * the first highest(reverse order) vmap_area is returned
3224 * i.e. va->va_start < addr && va->va_end < addr or NULL
3225 * if there are no any areas before @addr.
3227 static struct vmap_area *
3228 pvm_find_va_enclose_addr(unsigned long addr)
3230 struct vmap_area *va, *tmp;
3233 n = free_vmap_area_root.rb_node;
3237 tmp = rb_entry(n, struct vmap_area, rb_node);
3238 if (tmp->va_start <= addr) {
3240 if (tmp->va_end >= addr)
3253 * pvm_determine_end_from_reverse - find the highest aligned address
3254 * of free block below VMALLOC_END
3256 * in - the VA we start the search(reverse order);
3257 * out - the VA with the highest aligned end address.
3259 * Returns: determined end address within vmap_area
3261 static unsigned long
3262 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3264 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3268 list_for_each_entry_from_reverse((*va),
3269 &free_vmap_area_list, list) {
3270 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3271 if ((*va)->va_start < addr)
3280 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3281 * @offsets: array containing offset of each area
3282 * @sizes: array containing size of each area
3283 * @nr_vms: the number of areas to allocate
3284 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3286 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3287 * vm_structs on success, %NULL on failure
3289 * Percpu allocator wants to use congruent vm areas so that it can
3290 * maintain the offsets among percpu areas. This function allocates
3291 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3292 * be scattered pretty far, distance between two areas easily going up
3293 * to gigabytes. To avoid interacting with regular vmallocs, these
3294 * areas are allocated from top.
3296 * Despite its complicated look, this allocator is rather simple. It
3297 * does everything top-down and scans free blocks from the end looking
3298 * for matching base. While scanning, if any of the areas do not fit the
3299 * base address is pulled down to fit the area. Scanning is repeated till
3300 * all the areas fit and then all necessary data structures are inserted
3301 * and the result is returned.
3303 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3304 const size_t *sizes, int nr_vms,
3307 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3308 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3309 struct vmap_area **vas, *va;
3310 struct vm_struct **vms;
3311 int area, area2, last_area, term_area;
3312 unsigned long base, start, size, end, last_end, orig_start, orig_end;
3313 bool purged = false;
3316 /* verify parameters and allocate data structures */
3317 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3318 for (last_area = 0, area = 0; area < nr_vms; area++) {
3319 start = offsets[area];
3320 end = start + sizes[area];
3322 /* is everything aligned properly? */
3323 BUG_ON(!IS_ALIGNED(offsets[area], align));
3324 BUG_ON(!IS_ALIGNED(sizes[area], align));
3326 /* detect the area with the highest address */
3327 if (start > offsets[last_area])
3330 for (area2 = area + 1; area2 < nr_vms; area2++) {
3331 unsigned long start2 = offsets[area2];
3332 unsigned long end2 = start2 + sizes[area2];
3334 BUG_ON(start2 < end && start < end2);
3337 last_end = offsets[last_area] + sizes[last_area];
3339 if (vmalloc_end - vmalloc_start < last_end) {
3344 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3345 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3349 for (area = 0; area < nr_vms; area++) {
3350 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3351 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3352 if (!vas[area] || !vms[area])
3356 spin_lock(&free_vmap_area_lock);
3358 /* start scanning - we scan from the top, begin with the last area */
3359 area = term_area = last_area;
3360 start = offsets[area];
3361 end = start + sizes[area];
3363 va = pvm_find_va_enclose_addr(vmalloc_end);
3364 base = pvm_determine_end_from_reverse(&va, align) - end;
3368 * base might have underflowed, add last_end before
3371 if (base + last_end < vmalloc_start + last_end)
3375 * Fitting base has not been found.
3381 * If required width exceeds current VA block, move
3382 * base downwards and then recheck.
3384 if (base + end > va->va_end) {
3385 base = pvm_determine_end_from_reverse(&va, align) - end;
3391 * If this VA does not fit, move base downwards and recheck.
3393 if (base + start < va->va_start) {
3394 va = node_to_va(rb_prev(&va->rb_node));
3395 base = pvm_determine_end_from_reverse(&va, align) - end;
3401 * This area fits, move on to the previous one. If
3402 * the previous one is the terminal one, we're done.
3404 area = (area + nr_vms - 1) % nr_vms;
3405 if (area == term_area)
3408 start = offsets[area];
3409 end = start + sizes[area];
3410 va = pvm_find_va_enclose_addr(base + end);
3413 /* we've found a fitting base, insert all va's */
3414 for (area = 0; area < nr_vms; area++) {
3417 start = base + offsets[area];
3420 va = pvm_find_va_enclose_addr(start);
3421 if (WARN_ON_ONCE(va == NULL))
3422 /* It is a BUG(), but trigger recovery instead. */
3425 type = classify_va_fit_type(va, start, size);
3426 if (WARN_ON_ONCE(type == NOTHING_FIT))
3427 /* It is a BUG(), but trigger recovery instead. */
3430 ret = adjust_va_to_fit_type(va, start, size, type);
3434 /* Allocated area. */
3436 va->va_start = start;
3437 va->va_end = start + size;
3440 spin_unlock(&free_vmap_area_lock);
3442 /* populate the kasan shadow space */
3443 for (area = 0; area < nr_vms; area++) {
3444 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3445 goto err_free_shadow;
3447 kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3451 /* insert all vm's */
3452 spin_lock(&vmap_area_lock);
3453 for (area = 0; area < nr_vms; area++) {
3454 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3456 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3459 spin_unlock(&vmap_area_lock);
3466 * Remove previously allocated areas. There is no
3467 * need in removing these areas from the busy tree,
3468 * because they are inserted only on the final step
3469 * and when pcpu_get_vm_areas() is success.
3472 orig_start = vas[area]->va_start;
3473 orig_end = vas[area]->va_end;
3474 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3475 &free_vmap_area_list);
3476 kasan_release_vmalloc(orig_start, orig_end,
3477 va->va_start, va->va_end);
3482 spin_unlock(&free_vmap_area_lock);
3484 purge_vmap_area_lazy();
3487 /* Before "retry", check if we recover. */
3488 for (area = 0; area < nr_vms; area++) {
3492 vas[area] = kmem_cache_zalloc(
3493 vmap_area_cachep, GFP_KERNEL);
3502 for (area = 0; area < nr_vms; area++) {
3504 kmem_cache_free(vmap_area_cachep, vas[area]);
3514 spin_lock(&free_vmap_area_lock);
3516 * We release all the vmalloc shadows, even the ones for regions that
3517 * hadn't been successfully added. This relies on kasan_release_vmalloc
3518 * being able to tolerate this case.
3520 for (area = 0; area < nr_vms; area++) {
3521 orig_start = vas[area]->va_start;
3522 orig_end = vas[area]->va_end;
3523 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3524 &free_vmap_area_list);
3525 kasan_release_vmalloc(orig_start, orig_end,
3526 va->va_start, va->va_end);
3530 spin_unlock(&free_vmap_area_lock);
3537 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3538 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3539 * @nr_vms: the number of allocated areas
3541 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3543 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3547 for (i = 0; i < nr_vms; i++)
3548 free_vm_area(vms[i]);
3551 #endif /* CONFIG_SMP */
3553 #ifdef CONFIG_PROC_FS
3554 static void *s_start(struct seq_file *m, loff_t *pos)
3555 __acquires(&vmap_purge_lock)
3556 __acquires(&vmap_area_lock)
3558 mutex_lock(&vmap_purge_lock);
3559 spin_lock(&vmap_area_lock);
3561 return seq_list_start(&vmap_area_list, *pos);
3564 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3566 return seq_list_next(p, &vmap_area_list, pos);
3569 static void s_stop(struct seq_file *m, void *p)
3570 __releases(&vmap_purge_lock)
3571 __releases(&vmap_area_lock)
3573 mutex_unlock(&vmap_purge_lock);
3574 spin_unlock(&vmap_area_lock);
3577 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3579 if (IS_ENABLED(CONFIG_NUMA)) {
3580 unsigned int nr, *counters = m->private;
3585 if (v->flags & VM_UNINITIALIZED)
3587 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3590 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3592 for (nr = 0; nr < v->nr_pages; nr++)
3593 counters[page_to_nid(v->pages[nr])]++;
3595 for_each_node_state(nr, N_HIGH_MEMORY)
3597 seq_printf(m, " N%u=%u", nr, counters[nr]);
3601 static void show_purge_info(struct seq_file *m)
3603 struct llist_node *head;
3604 struct vmap_area *va;
3606 head = READ_ONCE(vmap_purge_list.first);
3610 llist_for_each_entry(va, head, purge_list) {
3611 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3612 (void *)va->va_start, (void *)va->va_end,
3613 va->va_end - va->va_start);
3617 static int s_show(struct seq_file *m, void *p)
3619 struct vmap_area *va;
3620 struct vm_struct *v;
3622 va = list_entry(p, struct vmap_area, list);
3625 * s_show can encounter race with remove_vm_area, !vm on behalf
3626 * of vmap area is being tear down or vm_map_ram allocation.
3629 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3630 (void *)va->va_start, (void *)va->va_end,
3631 va->va_end - va->va_start);
3638 seq_printf(m, "0x%pK-0x%pK %7ld",
3639 v->addr, v->addr + v->size, v->size);
3642 seq_printf(m, " %pS", v->caller);
3645 seq_printf(m, " pages=%d", v->nr_pages);
3648 seq_printf(m, " phys=%pa", &v->phys_addr);
3650 if (v->flags & VM_IOREMAP)
3651 seq_puts(m, " ioremap");
3653 if (v->flags & VM_ALLOC)
3654 seq_puts(m, " vmalloc");
3656 if (v->flags & VM_MAP)
3657 seq_puts(m, " vmap");
3659 if (v->flags & VM_USERMAP)
3660 seq_puts(m, " user");
3662 if (v->flags & VM_DMA_COHERENT)
3663 seq_puts(m, " dma-coherent");
3665 if (is_vmalloc_addr(v->pages))
3666 seq_puts(m, " vpages");
3668 show_numa_info(m, v);
3672 * As a final step, dump "unpurged" areas. Note,
3673 * that entire "/proc/vmallocinfo" output will not
3674 * be address sorted, because the purge list is not
3677 if (list_is_last(&va->list, &vmap_area_list))
3683 static const struct seq_operations vmalloc_op = {
3690 static int __init proc_vmalloc_init(void)
3692 if (IS_ENABLED(CONFIG_NUMA))
3693 proc_create_seq_private("vmallocinfo", 0400, NULL,
3695 nr_node_ids * sizeof(unsigned int), NULL);
3697 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3700 module_init(proc_vmalloc_init);