node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
+
+ efi_memmap_walk(register_active_ranges, NULL);
+
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
{
unsigned long end = start + len;
- add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
mem_data[node].num_physpages += len >> PAGE_SHIFT;
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
return 0;
}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
- add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
+ int nid = paddr_to_nid(__pa(start));
+
+ if (nid < 0)
+ nid = 0;
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+ __pa(end) >> PAGE_SHIFT);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
+extern int register_active_ranges(u64 start, u64 end, void *arg);
+
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long vmalloc_end;
extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg);
- extern int register_active_ranges (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else