1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Contiguous Memory Allocator
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
15 #define pr_fmt(fmt) "cma: " fmt
17 #ifdef CONFIG_CMA_DEBUG
22 #define CREATE_TRACE_POINTS
24 #include <linux/memblock.h>
25 #include <linux/err.h>
27 #include <linux/sizes.h>
28 #include <linux/slab.h>
29 #include <linux/log2.h>
30 #include <linux/cma.h>
31 #include <linux/highmem.h>
33 #include <linux/kmemleak.h>
34 #include <trace/events/cma.h>
39 struct cma cma_areas[MAX_CMA_AREAS];
40 unsigned cma_area_count;
41 static DEFINE_MUTEX(cma_mutex);
43 phys_addr_t cma_get_base(const struct cma *cma)
45 return PFN_PHYS(cma->base_pfn);
48 unsigned long cma_get_size(const struct cma *cma)
50 return cma->count << PAGE_SHIFT;
53 const char *cma_get_name(const struct cma *cma)
58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 unsigned int align_order)
61 if (align_order <= cma->order_per_bit)
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
67 * Find the offset of the base PFN from the specified align_order.
68 * The value returned is represented in order_per_bits.
70 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 unsigned int align_order)
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
83 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
86 unsigned long bitmap_no, bitmap_count;
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
92 spin_lock_irqsave(&cma->lock, flags);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 spin_unlock_irqrestore(&cma->lock, flags);
97 static void __init cma_activate_area(struct cma *cma)
99 unsigned long base_pfn = cma->base_pfn, pfn;
102 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
107 * alloc_contig_range() requires the pfn range specified to be in the
108 * same zone. Simplify by forcing the entire CMA resv range to be in the
111 WARN_ON_ONCE(!pfn_valid(base_pfn));
112 zone = page_zone(pfn_to_page(base_pfn));
113 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
114 WARN_ON_ONCE(!pfn_valid(pfn));
115 if (page_zone(pfn_to_page(pfn)) != zone)
119 for (pfn = base_pfn; pfn < base_pfn + cma->count;
120 pfn += pageblock_nr_pages)
121 init_cma_reserved_pageblock(pfn_to_page(pfn));
123 spin_lock_init(&cma->lock);
125 #ifdef CONFIG_CMA_DEBUGFS
126 INIT_HLIST_HEAD(&cma->mem_head);
127 spin_lock_init(&cma->mem_head_lock);
133 bitmap_free(cma->bitmap);
135 /* Expose all pages to the buddy, they are useless for CMA. */
136 if (!cma->reserve_pages_on_error) {
137 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
138 free_reserved_page(pfn_to_page(pfn));
140 totalcma_pages -= cma->count;
142 pr_err("CMA area %s could not be activated\n", cma->name);
146 static int __init cma_init_reserved_areas(void)
150 for (i = 0; i < cma_area_count; i++)
151 cma_activate_area(&cma_areas[i]);
155 core_initcall(cma_init_reserved_areas);
157 void __init cma_reserve_pages_on_error(struct cma *cma)
159 cma->reserve_pages_on_error = true;
163 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
164 * @base: Base address of the reserved area
165 * @size: Size of the reserved area (in bytes),
166 * @order_per_bit: Order of pages represented by one bit on bitmap.
167 * @name: The name of the area. If this parameter is NULL, the name of
168 * the area will be set to "cmaN", where N is a running counter of
170 * @res_cma: Pointer to store the created cma region.
172 * This function creates custom contiguous area from already reserved memory.
174 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 unsigned int order_per_bit,
177 struct cma **res_cma)
182 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
183 pr_err("Not enough slots for CMA reserved regions!\n");
187 if (!size || !memblock_is_region_reserved(base, size))
190 /* alignment should be aligned with order_per_bit */
191 if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
194 /* ensure minimal alignment required by mm core */
195 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
199 * Each reserved area must be initialised later, when more kernel
200 * subsystems (like slab allocator) are available.
202 cma = &cma_areas[cma_area_count];
205 snprintf(cma->name, CMA_MAX_NAME, name);
207 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
209 cma->base_pfn = PFN_DOWN(base);
210 cma->count = size >> PAGE_SHIFT;
211 cma->order_per_bit = order_per_bit;
214 totalcma_pages += (size / PAGE_SIZE);
220 * cma_declare_contiguous_nid() - reserve custom contiguous area
221 * @base: Base address of the reserved area optional, use 0 for any
222 * @size: Size of the reserved area (in bytes),
223 * @limit: End address of the reserved memory (optional, 0 for any).
224 * @alignment: Alignment for the CMA area, should be power of 2 or zero
225 * @order_per_bit: Order of pages represented by one bit on bitmap.
226 * @fixed: hint about where to place the reserved area
227 * @name: The name of the area. See function cma_init_reserved_mem()
228 * @res_cma: Pointer to store the created cma region.
229 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
231 * This function reserves memory from early allocator. It should be
232 * called by arch specific code once the early allocator (memblock or bootmem)
233 * has been activated and all other subsystems have already allocated/reserved
234 * memory. This function allows to create custom reserved areas.
236 * If @fixed is true, reserve contiguous area at exactly @base. If false,
237 * reserve in range from @base to @limit.
239 int __init cma_declare_contiguous_nid(phys_addr_t base,
240 phys_addr_t size, phys_addr_t limit,
241 phys_addr_t alignment, unsigned int order_per_bit,
242 bool fixed, const char *name, struct cma **res_cma,
245 phys_addr_t memblock_end = memblock_end_of_DRAM();
246 phys_addr_t highmem_start;
250 * We can't use __pa(high_memory) directly, since high_memory
251 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
252 * complain. Find the boundary by adding one to the last valid
255 highmem_start = __pa(high_memory - 1) + 1;
256 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
257 __func__, &size, &base, &limit, &alignment);
259 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
260 pr_err("Not enough slots for CMA reserved regions!\n");
267 if (alignment && !is_power_of_2(alignment))
270 /* Sanitise input arguments. */
271 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
272 if (fixed && base & (alignment - 1)) {
274 pr_err("Region at %pa must be aligned to %pa bytes\n",
278 base = ALIGN(base, alignment);
279 size = ALIGN(size, alignment);
280 limit &= ~(alignment - 1);
285 /* size should be aligned with order_per_bit */
286 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
290 * If allocating at a fixed base the request region must not cross the
291 * low/high memory boundary.
293 if (fixed && base < highmem_start && base + size > highmem_start) {
295 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
296 &base, &highmem_start);
301 * If the limit is unspecified or above the memblock end, its effective
302 * value will be the memblock end. Set it explicitly to simplify further
305 if (limit == 0 || limit > memblock_end)
306 limit = memblock_end;
308 if (base + size > limit) {
310 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
311 &size, &base, &limit);
317 if (memblock_is_region_reserved(base, size) ||
318 memblock_reserve(base, size) < 0) {
323 phys_addr_t addr = 0;
326 * If there is enough memory, try a bottom-up allocation first.
327 * It will place the new cma area close to the start of the node
328 * and guarantee that the compaction is moving pages out of the
329 * cma area and not into it.
330 * Avoid using first 4GB to not interfere with constrained zones
333 #ifdef CONFIG_PHYS_ADDR_T_64BIT
334 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
335 memblock_set_bottom_up(true);
336 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
338 memblock_set_bottom_up(false);
343 * All pages in the reserved area must come from the same zone.
344 * If the requested region crosses the low/high memory boundary,
345 * try allocating from high memory first and fall back to low
346 * memory in case of failure.
348 if (!addr && base < highmem_start && limit > highmem_start) {
349 addr = memblock_alloc_range_nid(size, alignment,
350 highmem_start, limit, nid, true);
351 limit = highmem_start;
355 addr = memblock_alloc_range_nid(size, alignment, base,
364 * kmemleak scans/reads tracked objects for pointers to other
365 * objects but this address isn't mapped and accessible
367 kmemleak_ignore_phys(addr);
371 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
375 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
380 memblock_phys_free(base, size);
382 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
386 #ifdef CONFIG_CMA_DEBUG
387 static void cma_debug_show_areas(struct cma *cma)
389 unsigned long next_zero_bit, next_set_bit, nr_zero;
390 unsigned long start = 0;
391 unsigned long nr_part, nr_total = 0;
392 unsigned long nbits = cma_bitmap_maxno(cma);
394 spin_lock_irq(&cma->lock);
395 pr_info("number of available pages: ");
397 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
398 if (next_zero_bit >= nbits)
400 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
401 nr_zero = next_set_bit - next_zero_bit;
402 nr_part = nr_zero << cma->order_per_bit;
403 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
406 start = next_zero_bit + nr_zero;
408 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
409 spin_unlock_irq(&cma->lock);
412 static inline void cma_debug_show_areas(struct cma *cma) { }
416 * cma_alloc() - allocate pages from contiguous area
417 * @cma: Contiguous memory region for which the allocation is performed.
418 * @count: Requested number of pages.
419 * @align: Requested alignment of pages (in PAGE_SIZE order).
420 * @no_warn: Avoid printing message about failed allocation
422 * This function allocates part of contiguous memory on specific
423 * contiguous memory area.
425 struct page *cma_alloc(struct cma *cma, unsigned long count,
426 unsigned int align, bool no_warn)
428 unsigned long mask, offset;
429 unsigned long pfn = -1;
430 unsigned long start = 0;
431 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
433 struct page *page = NULL;
436 if (!cma || !cma->count || !cma->bitmap)
439 pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma,
445 trace_cma_alloc_start(cma->name, count, align);
447 mask = cma_bitmap_aligned_mask(cma, align);
448 offset = cma_bitmap_aligned_offset(cma, align);
449 bitmap_maxno = cma_bitmap_maxno(cma);
450 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
452 if (bitmap_count > bitmap_maxno)
456 spin_lock_irq(&cma->lock);
457 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
458 bitmap_maxno, start, bitmap_count, mask,
460 if (bitmap_no >= bitmap_maxno) {
461 spin_unlock_irq(&cma->lock);
464 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
466 * It's safe to drop the lock here. We've marked this region for
467 * our exclusive use. If the migration fails we will take the
468 * lock again and unmark it.
470 spin_unlock_irq(&cma->lock);
472 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
473 mutex_lock(&cma_mutex);
474 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
475 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
476 mutex_unlock(&cma_mutex);
478 page = pfn_to_page(pfn);
482 cma_clear_bitmap(cma, pfn, count);
486 pr_debug("%s(): memory range at %p is busy, retrying\n",
487 __func__, pfn_to_page(pfn));
489 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
491 /* try again with a bit different memory target */
492 start = bitmap_no + mask + 1;
495 trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret);
498 * CMA can allocate multiple page blocks, which results in different
499 * blocks being marked with different tags. Reset the tags to ignore
503 for (i = 0; i < count; i++)
504 page_kasan_tag_reset(page + i);
507 if (ret && !no_warn) {
508 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
509 __func__, cma->name, count, ret);
510 cma_debug_show_areas(cma);
513 pr_debug("%s(): returned %p\n", __func__, page);
516 count_vm_event(CMA_ALLOC_SUCCESS);
517 cma_sysfs_account_success_pages(cma, count);
519 count_vm_event(CMA_ALLOC_FAIL);
521 cma_sysfs_account_fail_pages(cma, count);
527 bool cma_pages_valid(struct cma *cma, const struct page *pages,
535 pfn = page_to_pfn(pages);
537 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
538 pr_debug("%s(page %p, count %lu)\n", __func__,
539 (void *)pages, count);
547 * cma_release() - release allocated pages
548 * @cma: Contiguous memory region for which the allocation is performed.
549 * @pages: Allocated pages.
550 * @count: Number of allocated pages.
552 * This function releases memory allocated by cma_alloc().
553 * It returns false when provided pages do not belong to contiguous area and
556 bool cma_release(struct cma *cma, const struct page *pages,
561 if (!cma_pages_valid(cma, pages, count))
564 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
566 pfn = page_to_pfn(pages);
568 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
570 free_contig_range(pfn, count);
571 cma_clear_bitmap(cma, pfn, count);
572 trace_cma_release(cma->name, pfn, pages, count);
577 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
581 for (i = 0; i < cma_area_count; i++) {
582 int ret = it(&cma_areas[i], data);