typedef struct page *pgtable_t;
/*
- * For 3-level pagetables we defines these ourselves, for 2-level the
- * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
- */
-#ifdef CONFIG_64BIT
-
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x) ((x).pmd)
-#define __pmd(x) ((pmd_t) { (x) } )
-
-#endif
-
-/*
* Right now we don't support 4-level pagetables, so all pud-related
* definitions come from <asm-generic/pgtable-nopud.h>.
*/
*/
extern void pmd_init(unsigned long page, unsigned long pagetable);
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
tlb_remove_page((tlb), pte); \
} while (0)
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
#include <asm/cachectl.h>
#include <asm/fixmap.h>
+#ifdef CONFIG_PAGE_SIZE_64KB
+#include <asm-generic/pgtable-nopmd.h>
+#else
#include <asm-generic/pgtable-nopud.h>
+#endif
/*
* Each address space has 2 4K pages as its page directory, giving 1024
* fault address - VMALLOC_START.
*/
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#ifdef __PAGETABLE_PMD_FOLDED
+#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
+#else
+
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#ifdef CONFIG_PAGE_SIZE_64KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
+#define PMD_ORDER aieeee_attempt_to_allocate_pmd
#define PTE_ORDER 0
#endif
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#ifndef __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
+#endif
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#if PGDIR_SIZE >= TASK_SIZE
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#endif
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
extern pte_t empty_bad_page_table[PTRS_PER_PTE];
+
+
+#ifndef __PAGETABLE_PMD_FOLDED
+/*
+ * For 3-level pagetables we defines these ourselves, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) } )
+
+
extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD];
+#endif
/*
* Empty pgd/pmd entries point to the invalid_pte_table.
{
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
}
+#ifndef __PAGETABLE_PMD_FOLDED
/*
* Empty pud entries point to the invalid_pmd_table.
{
pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
}
+#endif
#define pte_page(x) pfn_to_page(pte_pfn(x))
/* to find an entry in a page-table-directory */
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#ifndef __PAGETABLE_PMD_FOLDED
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return pud_val(pud);
{
return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
}
+#endif
/* Find an entry in the third-level page table.. */
#define __pte_offset(address) \
*/
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
* will officially be retired.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
void pgd_init(unsigned long page)
{
unsigned long *p, *end;
+ unsigned long entry;
+
+#ifdef __PAGETABLE_PMD_FOLDED
+ entry = (unsigned long)invalid_pte_table;
+#else
+ entry = (unsigned long)invalid_pmd_table;
+#endif
p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
while (p < end) {
- p[0] = (unsigned long) invalid_pmd_table;
- p[1] = (unsigned long) invalid_pmd_table;
- p[2] = (unsigned long) invalid_pmd_table;
- p[3] = (unsigned long) invalid_pmd_table;
- p[4] = (unsigned long) invalid_pmd_table;
- p[5] = (unsigned long) invalid_pmd_table;
- p[6] = (unsigned long) invalid_pmd_table;
- p[7] = (unsigned long) invalid_pmd_table;
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p[5] = entry;
+ p[6] = entry;
+ p[7] = entry;
p += 8;
}
}
+#ifndef __PAGETABLE_PMD_FOLDED
void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
end = p + PTRS_PER_PMD;
while (p < end) {
- p[0] = (unsigned long)pagetable;
- p[1] = (unsigned long)pagetable;
- p[2] = (unsigned long)pagetable;
- p[3] = (unsigned long)pagetable;
- p[4] = (unsigned long)pagetable;
- p[5] = (unsigned long)pagetable;
- p[6] = (unsigned long)pagetable;
- p[7] = (unsigned long)pagetable;
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p[5] = pagetable;
+ p[6] = pagetable;
+ p[7] = pagetable;
p += 8;
}
}
+#endif
void __init pagetable_init(void)
{
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
+#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
-
+#endif
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
+#endif
}
/*