{
struct cachepolicy *cp;
unsigned int cr = get_cr();
+ unsigned int user_pgprot;
int cpu_arch = cpu_architecture();
int i;
}
}
+ cp = &cache_policies[cachepolicy];
+ user_pgprot = cp->pte;
+
/*
* ARMv6 and above have extended page tables.
*/
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ /*
+ * Mark the device area as "shared device"
+ */
mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
- }
- cp = &cache_policies[cachepolicy];
+ /*
+ * User pages need to be mapped with the ASID
+ * (iow, non-global)
+ */
+ user_pgprot |= L_PTE_ASID;
+ }
if (cpu_arch >= CPU_ARCH_ARMv5) {
mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
- v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte;
+ v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
protection_map[i] = __pgprot(v);
}
ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version
- bic r2, r1, #0x00000ff0
+ bic r2, r1, #0x000007f0
bic r2, r2, #0x00000003
orr r2, r2, #PTE_EXT_AP0 | 2
orreq r2, r2, #PTE_EXT_APX
tst r1, #L_PTE_USER
- orrne r2, r2, #PTE_EXT_AP1 | PTE_EXT_NG
+ orrne r2, r2, #PTE_EXT_AP1
tstne r2, #PTE_EXT_APX
bicne r2, r2, #PTE_EXT_APX | PTE_EXT_AP0
#define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7)
+#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
+#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
#ifndef __ASSEMBLY__