2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock __initdata_memblock;
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
33 if (type == &memblock.memory)
35 else if (type == &memblock.reserved)
42 * Address comparison utilities
45 static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
47 return addr & ~(size - 1);
50 static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
52 return (addr + (size - 1)) & ~(size - 1);
55 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 phys_addr_t base2, phys_addr_t size2)
58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
61 long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
65 for (i = 0; i < type->cnt; i++) {
66 phys_addr_t rgnbase = type->regions[i].base;
67 phys_addr_t rgnsize = type->regions[i].size;
68 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
72 return (i < type->cnt) ? i : -1;
76 * Find, allocate, deallocate or reserve unreserved regions. All allocations
80 static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
81 phys_addr_t size, phys_addr_t align)
83 phys_addr_t base, res_base;
86 /* In case, huge size is requested */
88 return MEMBLOCK_ERROR;
90 base = memblock_align_down((end - size), align);
92 /* Prevent allocations returning 0 as it's also used to
93 * indicate an allocation failure
98 while (start <= base) {
99 j = memblock_overlaps_region(&memblock.reserved, base, size);
102 res_base = memblock.reserved.regions[j].base;
105 base = memblock_align_down(res_base - size, align);
108 return MEMBLOCK_ERROR;
111 static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
112 phys_addr_t align, phys_addr_t start, phys_addr_t end)
118 /* Pump up max_addr */
119 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
120 end = memblock.current_limit;
122 /* We do a top-down search, this tends to limit memory
123 * fragmentation by keeping early boot allocs near the
126 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
127 phys_addr_t memblockbase = memblock.memory.regions[i].base;
128 phys_addr_t memblocksize = memblock.memory.regions[i].size;
129 phys_addr_t bottom, top, found;
131 if (memblocksize < size)
133 if ((memblockbase + memblocksize) <= start)
135 bottom = max(memblockbase, start);
136 top = min(memblockbase + memblocksize, end);
139 found = memblock_find_region(bottom, top, size, align);
140 if (found != MEMBLOCK_ERROR)
143 return MEMBLOCK_ERROR;
147 * Find a free area with specified alignment in a specific range.
149 u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
151 return memblock_find_base(size, align, start, end);
155 * Free memblock.reserved.regions
157 int __init_memblock memblock_free_reserved_regions(void)
159 if (memblock.reserved.regions == memblock_reserved_init_regions)
162 return memblock_free(__pa(memblock.reserved.regions),
163 sizeof(struct memblock_region) * memblock.reserved.max);
167 * Reserve memblock.reserved.regions
169 int __init_memblock memblock_reserve_reserved_regions(void)
171 if (memblock.reserved.regions == memblock_reserved_init_regions)
174 return memblock_reserve(__pa(memblock.reserved.regions),
175 sizeof(struct memblock_region) * memblock.reserved.max);
178 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
182 for (i = r; i < type->cnt - 1; i++) {
183 type->regions[i].base = type->regions[i + 1].base;
184 type->regions[i].size = type->regions[i + 1].size;
188 /* Special case for empty arrays */
189 if (type->cnt == 0) {
191 type->regions[0].base = 0;
192 type->regions[0].size = 0;
196 /* Defined below but needed now */
197 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
199 static int __init_memblock memblock_double_array(struct memblock_type *type)
201 struct memblock_region *new_array, *old_array;
202 phys_addr_t old_size, new_size, addr;
203 int use_slab = slab_is_available();
205 /* We don't allow resizing until we know about the reserved regions
206 * of memory that aren't suitable for allocation
208 if (!memblock_can_resize)
211 /* Calculate new doubled size */
212 old_size = type->max * sizeof(struct memblock_region);
213 new_size = old_size << 1;
215 /* Try to find some space for it.
217 * WARNING: We assume that either slab_is_available() and we use it or
218 * we use MEMBLOCK for allocations. That means that this is unsafe to use
219 * when bootmem is currently active (unless bootmem itself is implemented
220 * on top of MEMBLOCK which isn't the case yet)
222 * This should however not be an issue for now, as we currently only
223 * call into MEMBLOCK while it's still active, or much later when slab is
224 * active for memory hotplug operations
227 new_array = kmalloc(new_size, GFP_KERNEL);
228 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
230 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
231 if (addr == MEMBLOCK_ERROR) {
232 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
233 memblock_type_name(type), type->max, type->max * 2);
236 new_array = __va(addr);
238 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
239 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
241 /* Found space, we now need to move the array over before
242 * we add the reserved region since it may be our reserved
243 * array itself that is full.
245 memcpy(new_array, type->regions, old_size);
246 memset(new_array + type->max, 0, old_size);
247 old_array = type->regions;
248 type->regions = new_array;
251 /* If we use SLAB that's it, we are done */
255 /* Add the new reserved region now. Should not fail ! */
256 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
258 /* If the array wasn't our static init one, then free it. We only do
259 * that before SLAB is available as later on, we don't know whether
260 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
263 if (old_array != memblock_memory_init_regions &&
264 old_array != memblock_reserved_init_regions)
265 memblock_free(__pa(old_array), old_size);
270 extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
271 phys_addr_t addr2, phys_addr_t size2)
276 static long __init_memblock memblock_add_region(struct memblock_type *type,
277 phys_addr_t base, phys_addr_t size)
279 phys_addr_t end = base + size;
282 /* First try and coalesce this MEMBLOCK with others */
283 for (i = 0; i < type->cnt; i++) {
284 struct memblock_region *rgn = &type->regions[i];
285 phys_addr_t rend = rgn->base + rgn->size;
287 /* Exit if there's no possible hits */
288 if (rgn->base > end || rgn->size == 0)
291 /* Check if we are fully enclosed within an existing
294 if (rgn->base <= base && rend >= end)
297 /* Check if we overlap or are adjacent with the bottom
300 if (base < rgn->base && end >= rgn->base) {
301 /* If we can't coalesce, create a new block */
302 if (!memblock_memory_can_coalesce(base, size,
305 /* Overlap & can't coalesce are mutually
306 * exclusive, if you do that, be prepared
309 WARN_ON(end != rgn->base);
312 /* We extend the bottom of the block down to our
316 rgn->size = rend - base;
318 /* Return if we have nothing else to allocate
324 /* We continue processing from the end of the
331 /* Now check if we overlap or are adjacent with the
334 if (base <= rend && end >= rend) {
335 /* If we can't coalesce, create a new block */
336 if (!memblock_memory_can_coalesce(rgn->base,
339 /* Overlap & can't coalesce are mutually
340 * exclusive, if you do that, be prepared
343 WARN_ON(rend != base);
346 /* We adjust our base down to enclose the
347 * original block and destroy it. It will be
348 * part of our new allocation. Since we've
349 * freed an entry, we know we won't fail
350 * to allocate one later, so we won't risk
351 * losing the original block allocation.
353 size += (base - rgn->base);
355 memblock_remove_region(type, i--);
359 /* If the array is empty, special case, replace the fake
360 * filler region and return
362 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
363 type->regions[0].base = base;
364 type->regions[0].size = size;
369 /* If we are out of space, we fail. It's too late to resize the array
370 * but then this shouldn't have happened in the first place.
372 if (WARN_ON(type->cnt >= type->max))
375 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
376 for (i = type->cnt - 1; i >= 0; i--) {
377 if (base < type->regions[i].base) {
378 type->regions[i+1].base = type->regions[i].base;
379 type->regions[i+1].size = type->regions[i].size;
381 type->regions[i+1].base = base;
382 type->regions[i+1].size = size;
387 if (base < type->regions[0].base) {
388 type->regions[0].base = base;
389 type->regions[0].size = size;
394 /* The array is full ? Try to resize it. If that fails, we undo
395 * our allocation and return an error
397 if (type->cnt == type->max && memblock_double_array(type)) {
399 memblock_remove_region(type, slot);
406 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
408 return memblock_add_region(&memblock.memory, base, size);
412 static long __init_memblock __memblock_remove(struct memblock_type *type,
413 phys_addr_t base, phys_addr_t size)
415 phys_addr_t end = base + size;
418 /* Walk through the array for collisions */
419 for (i = 0; i < type->cnt; i++) {
420 struct memblock_region *rgn = &type->regions[i];
421 phys_addr_t rend = rgn->base + rgn->size;
423 /* Nothing more to do, exit */
424 if (rgn->base > end || rgn->size == 0)
427 /* If we fully enclose the block, drop it */
428 if (base <= rgn->base && end >= rend) {
429 memblock_remove_region(type, i--);
433 /* If we are fully enclosed within a block
434 * then we need to split it and we are done
436 if (base > rgn->base && end < rend) {
437 rgn->size = base - rgn->base;
438 if (!memblock_add_region(type, end, rend - end))
440 /* Failure to split is bad, we at least
441 * restore the block before erroring
443 rgn->size = rend - rgn->base;
448 /* Check if we need to trim the bottom of a block */
449 if (rgn->base < end && rend > end) {
450 rgn->size -= end - rgn->base;
455 /* And check if we need to trim the top of a block */
457 rgn->size -= rend - base;
463 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
465 return __memblock_remove(&memblock.memory, base, size);
468 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
470 return __memblock_remove(&memblock.reserved, base, size);
473 long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
475 struct memblock_type *_rgn = &memblock.reserved;
479 return memblock_add_region(_rgn, base, size);
482 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
486 /* We align the size to limit fragmentation. Without this, a lot of
487 * small allocs quickly eat up the whole reserve array on sparc
489 size = memblock_align_up(size, align);
491 found = memblock_find_base(size, align, 0, max_addr);
492 if (found != MEMBLOCK_ERROR &&
493 !memblock_add_region(&memblock.reserved, found, size))
499 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
503 alloc = __memblock_alloc_base(size, align, max_addr);
506 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
507 (unsigned long long) size, (unsigned long long) max_addr);
512 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
514 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
519 * Additional node-local allocators. Search for node memory is bottom up
520 * and walks memblock regions within that node bottom-up as well, but allocation
521 * within an memblock region is top-down. XXX I plan to fix that at some stage
523 * WARNING: Only available after early_node_map[] has been populated,
524 * on some architectures, that is after all the calls to add_active_range()
525 * have been done to populate it.
528 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
530 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
532 * This code originates from sparc which really wants use to walk by addresses
533 * and returns the nid. This is not very convenient for early_pfn_map[] users
534 * as the map isn't sorted yet, and it really wants to be walked by nid.
536 * For now, I implement the inefficient method below which walks the early
537 * map multiple times. Eventually we may want to use an ARCH config option
538 * to implement a completely different method for both case.
540 unsigned long start_pfn, end_pfn;
543 for (i = 0; i < MAX_NUMNODES; i++) {
544 get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
545 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
548 return min(end, PFN_PHYS(end_pfn));
556 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
558 phys_addr_t align, int nid)
560 phys_addr_t start, end;
563 end = start + mp->size;
565 start = memblock_align_up(start, align);
566 while (start < end) {
567 phys_addr_t this_end;
570 this_end = memblock_nid_range(start, end, &this_nid);
571 if (this_nid == nid) {
572 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
573 if (ret != MEMBLOCK_ERROR &&
574 !memblock_add_region(&memblock.reserved, ret, size))
580 return MEMBLOCK_ERROR;
583 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
585 struct memblock_type *mem = &memblock.memory;
590 /* We align the size to limit fragmentation. Without this, a lot of
591 * small allocs quickly eat up the whole reserve array on sparc
593 size = memblock_align_up(size, align);
595 /* We do a bottom-up search for a region with the right
596 * nid since that's easier considering how memblock_nid_range()
599 for (i = 0; i < mem->cnt; i++) {
600 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
602 if (ret != MEMBLOCK_ERROR)
609 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
611 phys_addr_t res = memblock_alloc_nid(size, align, nid);
615 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
620 * Remaining API functions
623 /* You must call memblock_analyze() before this. */
624 phys_addr_t __init memblock_phys_mem_size(void)
626 return memblock.memory_size;
630 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
632 return memblock.memory.regions[0].base;
635 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
637 int idx = memblock.memory.cnt - 1;
639 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
642 /* You must call memblock_analyze() after this. */
643 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
647 struct memblock_region *p;
652 /* Truncate the memblock regions to satisfy the memory limit. */
653 limit = memory_limit;
654 for (i = 0; i < memblock.memory.cnt; i++) {
655 if (limit > memblock.memory.regions[i].size) {
656 limit -= memblock.memory.regions[i].size;
660 memblock.memory.regions[i].size = limit;
661 memblock.memory.cnt = i + 1;
665 memory_limit = memblock_end_of_DRAM();
667 /* And truncate any reserves above the limit also. */
668 for (i = 0; i < memblock.reserved.cnt; i++) {
669 p = &memblock.reserved.regions[i];
671 if (p->base > memory_limit)
673 else if ((p->base + p->size) > memory_limit)
674 p->size = memory_limit - p->base;
677 memblock_remove_region(&memblock.reserved, i);
683 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
685 unsigned int left = 0, right = type->cnt;
688 unsigned int mid = (right + left) / 2;
690 if (addr < type->regions[mid].base)
692 else if (addr >= (type->regions[mid].base +
693 type->regions[mid].size))
697 } while (left < right);
701 int __init memblock_is_reserved(phys_addr_t addr)
703 return memblock_search(&memblock.reserved, addr) != -1;
706 int __init_memblock memblock_is_memory(phys_addr_t addr)
708 return memblock_search(&memblock.memory, addr) != -1;
711 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
713 int idx = memblock_search(&memblock.memory, base);
717 return memblock.memory.regions[idx].base <= base &&
718 (memblock.memory.regions[idx].base +
719 memblock.memory.regions[idx].size) >= (base + size);
722 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
724 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
728 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
730 memblock.current_limit = limit;
733 static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
735 unsigned long long base, size;
738 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
740 for (i = 0; i < region->cnt; i++) {
741 base = region->regions[i].base;
742 size = region->regions[i].size;
744 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
745 name, i, base, base + size - 1, size);
749 void __init_memblock memblock_dump_all(void)
754 pr_info("MEMBLOCK configuration:\n");
755 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
757 memblock_dump(&memblock.memory, "memory");
758 memblock_dump(&memblock.reserved, "reserved");
761 void __init memblock_analyze(void)
765 /* Check marker in the unused last array entry */
766 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
767 != MEMBLOCK_INACTIVE);
768 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
769 != MEMBLOCK_INACTIVE);
771 memblock.memory_size = 0;
773 for (i = 0; i < memblock.memory.cnt; i++)
774 memblock.memory_size += memblock.memory.regions[i].size;
776 /* We allow resizing from there */
777 memblock_can_resize = 1;
780 void __init memblock_init(void)
782 static int init_done __initdata = 0;
788 /* Hookup the initial arrays */
789 memblock.memory.regions = memblock_memory_init_regions;
790 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
791 memblock.reserved.regions = memblock_reserved_init_regions;
792 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
794 /* Write a marker in the unused last array entry */
795 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
796 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
798 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
799 * This simplifies the memblock_add() code below...
801 memblock.memory.regions[0].base = 0;
802 memblock.memory.regions[0].size = 0;
803 memblock.memory.cnt = 1;
806 memblock.reserved.regions[0].base = 0;
807 memblock.reserved.regions[0].size = 0;
808 memblock.reserved.cnt = 1;
810 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
813 static int __init early_memblock(char *p)
815 if (p && strstr(p, "debug"))
819 early_param("memblock", early_memblock);
821 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
823 static int memblock_debug_show(struct seq_file *m, void *private)
825 struct memblock_type *type = m->private;
826 struct memblock_region *reg;
829 for (i = 0; i < type->cnt; i++) {
830 reg = &type->regions[i];
831 seq_printf(m, "%4d: ", i);
832 if (sizeof(phys_addr_t) == 4)
833 seq_printf(m, "0x%08lx..0x%08lx\n",
834 (unsigned long)reg->base,
835 (unsigned long)(reg->base + reg->size - 1));
837 seq_printf(m, "0x%016llx..0x%016llx\n",
838 (unsigned long long)reg->base,
839 (unsigned long long)(reg->base + reg->size - 1));
845 static int memblock_debug_open(struct inode *inode, struct file *file)
847 return single_open(file, memblock_debug_show, inode->i_private);
850 static const struct file_operations memblock_debug_fops = {
851 .open = memblock_debug_open,
854 .release = single_release,
857 static int __init memblock_init_debugfs(void)
859 struct dentry *root = debugfs_create_dir("memblock", NULL);
862 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
863 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
867 __initcall(memblock_init_debugfs);
869 #endif /* CONFIG_DEBUG_FS */