early_iounmap(adr, PAGE_SIZE);
}
-static void __meminit
+static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
{
unsigned pages = 0;
+ unsigned long last_map_addr = end;
int i;
+
pte_t *pte = pte_page + pte_index(addr);
for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
printk(" pte=%p addr=%lx pte=%016lx\n",
pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
+ last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
pages++;
}
update_page_count(PG_LEVEL_4K, pages);
+
+ return last_map_addr;
}
-static void __meminit
+static unsigned long __meminit
phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
{
pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
- phys_pte_init(pte, address, end);
+ return phys_pte_init(pte, address, end);
}
static unsigned long __meminit
unsigned long page_size_mask)
{
unsigned long pages = 0;
+ unsigned long last_map_addr = end;
int i = pmd_index(address);
if (pmd_val(*pmd)) {
if (!pmd_large(*pmd))
- phys_pte_update(pmd, address, end);
+ last_map_addr = phys_pte_update(pmd, address,
+ end);
continue;
}
pages++;
set_pte((pte_t *)pmd,
pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+ last_map_addr = (address & PMD_MASK) + PMD_SIZE;
continue;
}
pte = alloc_low_page(&pte_phys);
- phys_pte_init(pte, address, end);
+ last_map_addr = phys_pte_init(pte, address, end);
unmap_low_page(pte);
pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
}
update_page_count(PG_LEVEL_2M, pages);
- return address;
+ return last_map_addr;
}
static unsigned long __meminit
return last_map_addr;
}
+
+struct map_range {
+ unsigned long start;
+ unsigned long end;
+ unsigned page_size_mask;
+};
+
+#define NR_RANGE_MR 5
+
+static int save_mr(struct map_range *mr, int nr_range,
+ unsigned long start_pfn, unsigned long end_pfn,
+ unsigned long page_size_mask)
+{
+
+ if (start_pfn < end_pfn) {
+ if (nr_range >= NR_RANGE_MR)
+ panic("run out of range for init_memory_mapping\n");
+ mr[nr_range].start = start_pfn<<PAGE_SHIFT;
+ mr[nr_range].end = end_pfn<<PAGE_SHIFT;
+ mr[nr_range].page_size_mask = page_size_mask;
+ nr_range++;
+ }
+
+ return nr_range;
+}
+
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long end)
{
- unsigned long last_map_addr = end;
+ unsigned long last_map_addr = 0;
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
+ struct map_range mr[NR_RANGE_MR];
+ int nr_range, i;
+
printk(KERN_INFO "init_memory_mapping\n");
/*
* memory mapped. Unfortunately this is done currently before the
* nodes are discovered.
*/
- if (!after_bootmem) {
+ if (!after_bootmem)
init_gbpages();
- find_early_table_space(end);
- }
if (direct_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
if (cpu_has_pse)
page_size_mask |= 1 << PG_LEVEL_2M;
- /* head if not big page aligment ?*/
+ memset(mr, 0, sizeof(mr));
+ nr_range = 0;
+
+ /* head if not big page alignment ?*/
start_pfn = start >> PAGE_SHIFT;
end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
- if (start_pfn < end_pfn)
- last_map_addr = kernel_physical_mapping_init(
- start_pfn<<PAGE_SHIFT,
- end_pfn<<PAGE_SHIFT, 0);
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
/* big page (2M) range*/
start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PUD_SHIFT - PAGE_SHIFT);
if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
- if (start_pfn < end_pfn)
- last_map_addr = kernel_physical_mapping_init(
- start_pfn<<PAGE_SHIFT,
- end_pfn<<PAGE_SHIFT,
- page_size_mask & (1<<PG_LEVEL_2M));
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+ page_size_mask & (1<<PG_LEVEL_2M));
/* big page (1G) range */
start_pfn = end_pfn;
end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
- if (start_pfn < end_pfn)
- last_map_addr = kernel_physical_mapping_init(
- start_pfn<<PAGE_SHIFT,
- end_pfn<<PAGE_SHIFT,
- page_size_mask & ((1<<PG_LEVEL_2M)
- | (1<<PG_LEVEL_1G)));
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+ page_size_mask &
+ ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
/* tail is not big page (1G) alignment */
start_pfn = end_pfn;
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
- if (start_pfn < end_pfn)
- last_map_addr = kernel_physical_mapping_init(
- start_pfn<<PAGE_SHIFT,
- end_pfn<<PAGE_SHIFT,
- page_size_mask & (1<<PG_LEVEL_2M));
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+ page_size_mask & (1<<PG_LEVEL_2M));
+
/* tail is not big page (2M) alignment */
start_pfn = end_pfn;
end_pfn = end>>PAGE_SHIFT;
- if (start_pfn < end_pfn)
+ nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+
+ for (i = 0; i < nr_range; i++)
+ printk(KERN_DEBUG " %010lx - %010lx page %s\n",
+ mr[i].start, mr[i].end,
+ (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
+ (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+
+ if (!after_bootmem)
+ find_early_table_space(end);
+
+ for (i = 0; i < nr_range; i++)
last_map_addr = kernel_physical_mapping_init(
- start_pfn<<PAGE_SHIFT,
- end_pfn<<PAGE_SHIFT, 0);
+ mr[i].start, mr[i].end,
+ mr[i].page_size_mask);
if (!after_bootmem)
mmu_cr4_features = read_cr4();