2 * Helper routines for building identity mapping page tables. This is
3 * included by both the compressed kernel and the regular kernel.
6 static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
7 unsigned long addr, unsigned long end)
10 for (; addr < end; addr += PMD_SIZE) {
11 pmd_t *pmd = pmd_page + pmd_index(addr);
13 if (!pmd_present(*pmd))
14 set_pmd(pmd, __pmd(addr | pmd_flag));
18 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
19 unsigned long addr, unsigned long end)
23 for (; addr < end; addr = next) {
24 pud_t *pud = pud_page + pud_index(addr);
27 next = (addr & PUD_MASK) + PUD_SIZE;
31 if (pud_present(*pud)) {
32 pmd = pmd_offset(pud, 0);
33 ident_pmd_init(info->pmd_flag, pmd, addr, next);
36 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
39 ident_pmd_init(info->pmd_flag, pmd, addr, next);
40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
46 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
47 unsigned long addr, unsigned long end)
51 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
53 for (; addr < end; addr = next) {
54 pgd_t *pgd = pgd_page + pgd_index(addr) + off;
57 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
61 if (pgd_present(*pgd)) {
62 pud = pud_offset(pgd, 0);
63 result = ident_pud_init(info, pud, addr, next);
69 pud = (pud_t *)info->alloc_pgt_page(info->context);
72 result = ident_pud_init(info, pud, addr, next);
75 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));