ARM: pgtable: collect up identity mapping functions
authorRussell King <rmk+kernel@arm.linux.org.uk>
Sun, 21 Nov 2010 11:41:57 +0000 (11:41 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 22 Dec 2010 11:05:33 +0000 (11:05 +0000)
We have two places where we create identity mappings - one when we bring
secondary CPUs online, and one where we setup some mappings for soft-
reboot.  Combine these two into a single implementation.  Also collect
the identity mapping deletion function.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/pgtable.h
arch/arm/kernel/smp.c
arch/arm/mm/Makefile
arch/arm/mm/idmap.c [new file with mode: 0644]
arch/arm/mm/mmu.c

index e582214..1e31af2 100644 (file)
@@ -474,6 +474,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 #define pgtable_cache_init() do { } while (0)
 
+void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
+void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* CONFIG_MMU */
index 4631380..73cef40 100644 (file)
@@ -68,40 +68,6 @@ enum ipi_msg_type {
        IPI_CPU_STOP,
 };
 
-static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
-       unsigned long end)
-{
-       unsigned long addr, prot;
-       pmd_t *pmd;
-
-       prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
-       if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
-               prot |= PMD_BIT4;
-
-       for (addr = start & PGDIR_MASK; addr < end;) {
-               pmd = pmd_offset(pgd + pgd_index(addr), addr);
-               pmd[0] = __pmd(addr | prot);
-               addr += SECTION_SIZE;
-               pmd[1] = __pmd(addr | prot);
-               addr += SECTION_SIZE;
-               flush_pmd_entry(pmd);
-       }
-}
-
-static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
-       unsigned long end)
-{
-       unsigned long addr;
-       pmd_t *pmd;
-
-       for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
-               pmd = pmd_offset(pgd + pgd_index(addr), addr);
-               pmd[0] = __pmd(0);
-               pmd[1] = __pmd(0);
-               clean_pmd_entry(pmd);
-       }
-}
-
 int __cpuinit __cpu_up(unsigned int cpu)
 {
        struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
index d63b6c4..00d74a0 100644 (file)
@@ -5,8 +5,8 @@
 obj-y                          := dma-mapping.o extable.o fault.o init.o \
                                   iomap.o
 
-obj-$(CONFIG_MMU)              += fault-armv.o flush.o ioremap.o mmap.o \
-                                  pgd.o mmu.o vmregion.o
+obj-$(CONFIG_MMU)              += fault-armv.o flush.o idmap.o ioremap.o \
+                                  mmap.o pgd.o mmu.o vmregion.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y                          += nommu.o
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
new file mode 100644 (file)
index 0000000..034124d
--- /dev/null
@@ -0,0 +1,51 @@
+#include <linux/kernel.h>
+
+#include <asm/cputype.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+       unsigned long prot;
+
+       prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
+       if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
+               prot |= PMD_BIT4;
+
+       for (addr &= PGDIR_MASK; addr < end;) {
+               pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr);
+               pmd[0] = __pmd(addr | prot);
+               addr += SECTION_SIZE;
+               pmd[1] = __pmd(addr | prot);
+               addr += SECTION_SIZE;
+               flush_pmd_entry(pmd);
+       }
+}
+
+#ifdef CONFIG_SMP
+void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+       for (addr &= PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
+               pmd_t *pmd = pmd_offset(pgd + pgd_index(addr), addr);
+               pmd[0] = __pmd(0);
+               pmd[1] = __pmd(0);
+               clean_pmd_entry(pmd);
+       }
+}
+#endif
+
+/*
+ * In order to soft-boot, we need to insert a 1:1 mapping in place of
+ * the user-mode pages.  This will then ensure that we have predictable
+ * results when turning the mmu off
+ */
+void setup_mm_for_reboot(char mode)
+{
+       /*
+        * We need to access to user-mode page tables here. For kernel threads
+        * we don't have any user-mode mappings so we use the context that we
+        * "borrowed".
+        */
+       identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
+       local_flush_tlb_all();
+}
index 94ee093..bd1a11e 100644 (file)
@@ -1045,38 +1045,3 @@ void __init paging_init(struct machine_desc *mdesc)
        empty_zero_page = virt_to_page(zero_page);
        __flush_dcache_page(NULL, empty_zero_page);
 }
-
-/*
- * In order to soft-boot, we need to insert a 1:1 mapping in place of
- * the user-mode pages.  This will then ensure that we have predictable
- * results when turning the mmu off
- */
-void setup_mm_for_reboot(char mode)
-{
-       unsigned long base_pmdval;
-       pgd_t *pgd;
-       int i;
-
-       /*
-        * We need to access to user-mode page tables here. For kernel threads
-        * we don't have any user-mode mappings so we use the context that we
-        * "borrowed".
-        */
-       pgd = current->active_mm->pgd;
-
-       base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
-       if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
-               base_pmdval |= PMD_BIT4;
-
-       for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
-               unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
-               pmd_t *pmd;
-
-               pmd = pmd_off(pgd, i << PGDIR_SHIFT);
-               pmd[0] = __pmd(pmdval);
-               pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
-               flush_pmd_entry(pmd);
-       }
-
-       local_flush_tlb_all();
-}