* dma_get_required_mask(), which uses
* it, can be an inline function */
+static LIST_HEAD(bdata_list);
#ifdef CONFIG_CRASH_DUMP
/*
* If we have booted due to a crash, max_pfn will be a very low value. We need
return mapsize;
}
+/*
+ * link bdata in order
+ */
+static void link_bootmem(bootmem_data_t *bdata)
+{
+ bootmem_data_t *ent;
+ if (list_empty(&bdata_list)) {
+ list_add(&bdata->list, &bdata_list);
+ return;
+ }
+ /* insert in order */
+ list_for_each_entry(ent, &bdata_list, list) {
+ if (bdata->node_boot_start < ent->node_boot_start) {
+ list_add_tail(&bdata->list, &ent->list);
+ return;
+ }
+ }
+ list_add_tail(&bdata->list, &bdata_list);
+ return;
+}
+
/*
* Called once to set up the allocator itself.
bootmem_data_t *bdata = pgdat->bdata;
unsigned long mapsize = ((end - start)+7)/8;
- pgdat->pgdat_next = pgdat_list;
- pgdat_list = pgdat;
-
mapsize = ALIGN(mapsize, sizeof(long));
bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
bdata->node_boot_start = (start << PAGE_SHIFT);
bdata->node_low_pfn = end;
+ link_bootmem(bdata);
/*
* Initially all pages are reserved - setup_arch() has to
void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal)
{
- pg_data_t *pgdat = pgdat_list;
+ bootmem_data_t *bdata;
void *ptr;
- for_each_pgdat(pgdat)
- if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
- align, goal, 0)))
+ list_for_each_entry(bdata, &bdata_list, list)
+ if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0)))
return(ptr);
/*
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal)
{
- pg_data_t *pgdat = pgdat_list;
+ bootmem_data_t *bdata;
void *ptr;
- for_each_pgdat(pgdat)
- if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
+ list_for_each_entry(bdata, &bdata_list, list)
+ if ((ptr = __alloc_bootmem_core(bdata, size,
align, goal, LOW32LIMIT)))
return(ptr);