1 // SPDX-License-Identifier: GPL-2.0+
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
15 #define LMB_ALLOC_ANYWHERE 0
17 void lmb_dump_all_force(struct lmb *lmb)
21 printf("lmb_dump_all:\n");
22 printf(" memory.cnt = 0x%lx\n", lmb->memory.cnt);
23 for (i = 0; i < lmb->memory.cnt; i++) {
24 printf(" memory.reg[0x%lx].base = 0x%llx\n", i,
25 (unsigned long long)lmb->memory.region[i].base);
26 printf(" .size = 0x%llx\n",
27 (unsigned long long)lmb->memory.region[i].size);
30 printf("\n reserved.cnt = 0x%lx\n", lmb->reserved.cnt);
31 for (i = 0; i < lmb->reserved.cnt; i++) {
32 printf(" reserved.reg[0x%lx].base = 0x%llx\n", i,
33 (unsigned long long)lmb->reserved.region[i].base);
34 printf(" .size = 0x%llx\n",
35 (unsigned long long)lmb->reserved.region[i].size);
39 void lmb_dump_all(struct lmb *lmb)
42 lmb_dump_all_force(lmb);
46 static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
47 phys_addr_t base2, phys_size_t size2)
49 const phys_addr_t base1_end = base1 + size1 - 1;
50 const phys_addr_t base2_end = base2 + size2 - 1;
52 return ((base1 <= base2_end) && (base2 <= base1_end));
55 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
56 phys_addr_t base2, phys_size_t size2)
58 if (base2 == base1 + size1)
60 else if (base1 == base2 + size2)
66 static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
69 phys_addr_t base1 = rgn->region[r1].base;
70 phys_size_t size1 = rgn->region[r1].size;
71 phys_addr_t base2 = rgn->region[r2].base;
72 phys_size_t size2 = rgn->region[r2].size;
74 return lmb_addrs_adjacent(base1, size1, base2, size2);
77 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
81 for (i = r; i < rgn->cnt - 1; i++) {
82 rgn->region[i].base = rgn->region[i + 1].base;
83 rgn->region[i].size = rgn->region[i + 1].size;
88 /* Assumption: base addr of region 1 < base addr of region 2 */
89 static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
92 rgn->region[r1].size += rgn->region[r2].size;
93 lmb_remove_region(rgn, r2);
96 void lmb_init(struct lmb *lmb)
98 #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
99 lmb->memory.max = CONFIG_LMB_MAX_REGIONS;
100 lmb->reserved.max = CONFIG_LMB_MAX_REGIONS;
102 lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS;
103 lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS;
104 lmb->memory.region = lmb->memory_regions;
105 lmb->reserved.region = lmb->reserved_regions;
108 lmb->reserved.cnt = 0;
111 static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
113 arch_lmb_reserve(lmb);
114 board_lmb_reserve(lmb);
116 if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
117 boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
120 /* Initialize the struct, add memory and call arch/board reserve functions */
121 void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob)
127 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
128 if (bd->bi_dram[i].size) {
129 lmb_add(lmb, bd->bi_dram[i].start,
130 bd->bi_dram[i].size);
134 lmb_reserve_common(lmb, fdt_blob);
137 /* Initialize the struct, add memory and call arch/board reserve functions */
138 void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
139 phys_size_t size, void *fdt_blob)
142 lmb_add(lmb, base, size);
143 lmb_reserve_common(lmb, fdt_blob);
146 /* This routine called with relocation disabled. */
147 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
149 unsigned long coalesced = 0;
153 rgn->region[0].base = base;
154 rgn->region[0].size = size;
159 /* First try and coalesce this LMB with another. */
160 for (i = 0; i < rgn->cnt; i++) {
161 phys_addr_t rgnbase = rgn->region[i].base;
162 phys_size_t rgnsize = rgn->region[i].size;
164 if ((rgnbase == base) && (rgnsize == size))
165 /* Already have this region, so we're done */
168 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
170 rgn->region[i].base -= size;
171 rgn->region[i].size += size;
174 } else if (adjacent < 0) {
175 rgn->region[i].size += size;
178 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
179 /* regions overlap */
184 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
185 lmb_coalesce_regions(rgn, i, i + 1);
191 if (rgn->cnt >= rgn->max)
194 /* Couldn't coalesce the LMB, so add it to the sorted table. */
195 for (i = rgn->cnt-1; i >= 0; i--) {
196 if (base < rgn->region[i].base) {
197 rgn->region[i + 1].base = rgn->region[i].base;
198 rgn->region[i + 1].size = rgn->region[i].size;
200 rgn->region[i + 1].base = base;
201 rgn->region[i + 1].size = size;
206 if (base < rgn->region[0].base) {
207 rgn->region[0].base = base;
208 rgn->region[0].size = size;
216 /* This routine may be called with relocation disabled. */
217 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
219 struct lmb_region *_rgn = &(lmb->memory);
221 return lmb_add_region(_rgn, base, size);
224 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
226 struct lmb_region *rgn = &(lmb->reserved);
227 phys_addr_t rgnbegin, rgnend;
228 phys_addr_t end = base + size - 1;
231 rgnbegin = rgnend = 0; /* supress gcc warnings */
233 /* Find the region where (base, size) belongs to */
234 for (i = 0; i < rgn->cnt; i++) {
235 rgnbegin = rgn->region[i].base;
236 rgnend = rgnbegin + rgn->region[i].size - 1;
238 if ((rgnbegin <= base) && (end <= rgnend))
242 /* Didn't find the region */
246 /* Check to see if we are removing entire region */
247 if ((rgnbegin == base) && (rgnend == end)) {
248 lmb_remove_region(rgn, i);
252 /* Check to see if region is matching at the front */
253 if (rgnbegin == base) {
254 rgn->region[i].base = end + 1;
255 rgn->region[i].size -= size;
259 /* Check to see if the region is matching at the end */
261 rgn->region[i].size -= size;
266 * We need to split the entry - adjust the current one to the
267 * beginging of the hole and add the region after hole.
269 rgn->region[i].size = base - rgn->region[i].base;
270 return lmb_add_region(rgn, end + 1, rgnend - end);
273 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
275 struct lmb_region *_rgn = &(lmb->reserved);
277 return lmb_add_region(_rgn, base, size);
280 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
285 for (i = 0; i < rgn->cnt; i++) {
286 phys_addr_t rgnbase = rgn->region[i].base;
287 phys_size_t rgnsize = rgn->region[i].size;
288 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
292 return (i < rgn->cnt) ? i : -1;
295 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
297 return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
300 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
304 alloc = __lmb_alloc_base(lmb, size, align, max_addr);
307 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
308 (ulong)size, (ulong)max_addr);
313 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
315 return addr & ~(size - 1);
318 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
321 phys_addr_t base = 0;
322 phys_addr_t res_base;
324 for (i = lmb->memory.cnt - 1; i >= 0; i--) {
325 phys_addr_t lmbbase = lmb->memory.region[i].base;
326 phys_size_t lmbsize = lmb->memory.region[i].size;
330 if (max_addr == LMB_ALLOC_ANYWHERE)
331 base = lmb_align_down(lmbbase + lmbsize - size, align);
332 else if (lmbbase < max_addr) {
333 base = lmbbase + lmbsize;
336 base = min(base, max_addr);
337 base = lmb_align_down(base - size, align);
341 while (base && lmbbase <= base) {
342 rgn = lmb_overlaps_region(&lmb->reserved, base, size);
344 /* This area isn't reserved, take it */
345 if (lmb_add_region(&lmb->reserved, base,
350 res_base = lmb->reserved.region[rgn].base;
353 base = lmb_align_down(res_base - size, align);
360 * Try to allocate a specific address range: must be in defined memory but not
363 phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
367 /* Check if the requested address is in one of the memory regions */
368 rgn = lmb_overlaps_region(&lmb->memory, base, size);
371 * Check if the requested end address is in the same memory
374 if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
375 lmb->memory.region[rgn].size,
376 base + size - 1, 1)) {
377 /* ok, reserve the memory */
378 if (lmb_reserve(lmb, base, size) >= 0)
385 /* Return number of bytes from a given address that are free */
386 phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
391 /* check if the requested address is in the memory regions */
392 rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
394 for (i = 0; i < lmb->reserved.cnt; i++) {
395 if (addr < lmb->reserved.region[i].base) {
396 /* first reserved range > requested address */
397 return lmb->reserved.region[i].base - addr;
399 if (lmb->reserved.region[i].base +
400 lmb->reserved.region[i].size > addr) {
401 /* requested addr is in this reserved range */
405 /* if we come here: no reserved ranges above requested addr */
406 return lmb->memory.region[lmb->memory.cnt - 1].base +
407 lmb->memory.region[lmb->memory.cnt - 1].size - addr;
412 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
416 for (i = 0; i < lmb->reserved.cnt; i++) {
417 phys_addr_t upper = lmb->reserved.region[i].base +
418 lmb->reserved.region[i].size - 1;
419 if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
425 __weak void board_lmb_reserve(struct lmb *lmb)
427 /* please define platform specific board_lmb_reserve() */
430 __weak void arch_lmb_reserve(struct lmb *lmb)
432 /* please define platform specific arch_lmb_reserve() */