/* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ target_ulong tlb_flush_addr; \
+ target_ulong tlb_flush_mask;
#else
void tlb_flush_page(CPUState *env, target_ulong addr);
void tlb_flush(CPUState *env, int flush_global);
#if !defined(CONFIG_USER_ONLY)
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu);
-static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
-{
- if (prot & PAGE_READ)
- prot |= PAGE_EXEC;
- return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
-}
+void tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int mmu_idx, target_ulong size);
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
+ env->tlb_flush_addr = -1;
+ env->tlb_flush_mask = 0;
tlb_flush_count++;
}
#if defined(DEBUG_TLB)
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif
+ /* Check if we need to flush due to large pages. */
+ if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+#if defined(DEBUG_TLB)
+ printf("tlb_flush_page: forced full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+#endif
+ tlb_flush(env, 1);
+ return;
+ }
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
env->current_tb = NULL;
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
}
-/* add a new TLB entry. At most one entry for a given virtual address
- is permitted. Return 0 if OK or 2 if the page could not be mapped
- (can only happen in non SOFTMMU mode for I/O pages or pages
- conflicting with the host address space). */
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
+/* Our TLB does not support large pages, so remember the area covered by
+ large pages and trigger a full TLB flush if these are invalidated. */
+static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
+ target_ulong size)
+{
+ target_ulong mask = ~(size - 1);
+
+ if (env->tlb_flush_addr == (target_ulong)-1) {
+ env->tlb_flush_addr = vaddr & mask;
+ env->tlb_flush_mask = mask;
+ return;
+ }
+ /* Extend the existing region to include the new page.
+ This is a compromise between unnecessary flushes and the cost
+ of maintaining a full variable size TLB. */
+ mask &= env->tlb_flush_mask;
+ while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
+ mask <<= 1;
+ }
+ env->tlb_flush_addr &= mask;
+ env->tlb_flush_mask = mask;
+}
+
+/* Add a new TLB entry. At most one entry for a given virtual address
+ is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
+ supplied size is only used by tlb_flush_page. */
+void tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int mmu_idx, target_ulong size)
{
PhysPageDesc *p;
unsigned long pd;
target_ulong address;
target_ulong code_address;
target_phys_addr_t addend;
- int ret;
CPUTLBEntry *te;
CPUWatchpoint *wp;
target_phys_addr_t iotlb;
+ assert(size >= TARGET_PAGE_SIZE);
+ if (size != TARGET_PAGE_SIZE) {
+ tlb_add_large_page(env, vaddr, size);
+ }
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
pd = IO_MEM_UNASSIGNED;
vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
#endif
- ret = 0;
address = vaddr;
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
/* IO memory case (romd handled later) */
} else {
te->addr_write = -1;
}
- return ret;
}
#else
/* No fault */
page_size = 1ULL << zbits;
address &= ~(page_size - 1);
+ /* FIXME: page_size should probably be passed to tlb_set_page,
+ and this loop removed. */
for (end = physical + page_size; physical < end; physical += 0x1000) {
- ret = tlb_set_page(env, address, physical, prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address, physical, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
address += 0x1000;
}
+ ret = 0;
break;
#if 0
case 1:
}
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+ int is_user, uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
{
int code;
uint32_t table;
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
ap = (desc >> 10) & 3;
code = 13;
+ *page_size = 1024 * 1024;
} else {
/* Lookup l2 entry. */
if (type == 1) {
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x10000;
break;
case 2: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x1000;
break;
case 3: /* 1k page. */
if (type == 1) {
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
}
ap = (desc >> 4) & 3;
+ *page_size = 0x400;
break;
default:
/* Never happens, but compiler isn't smart enough to tell. */
}
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+ int is_user, uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
{
int code;
uint32_t table;
if (desc & (1 << 18)) {
/* Supersection. */
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
+ *page_size = 0x1000000;
} else {
/* Section. */
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ *page_size = 0x100000;
}
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
xn = desc & (1 << 4);
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
xn = desc & (1 << 15);
+ *page_size = 0x10000;
break;
case 2: case 3: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
xn = desc & 1;
+ *page_size = 0x1000;
break;
default:
/* Never happens, but compiler isn't smart enough to tell. */
static inline int get_phys_addr(CPUState *env, uint32_t address,
int access_type, int is_user,
- uint32_t *phys_ptr, int *prot)
+ uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
{
/* Fast Context Switch Extension. */
if (address < 0x02000000)
/* MMU/MPU disabled. */
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE;
+ *page_size = TARGET_PAGE_SIZE;
return 0;
} else if (arm_feature(env, ARM_FEATURE_MPU)) {
+ *page_size = TARGET_PAGE_SIZE;
return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
prot);
} else if (env->cp15.c1_sys & (1 << 23)) {
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
- prot);
+ prot, page_size);
} else {
return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
- prot);
+ prot, page_size);
}
}
int access_type, int mmu_idx, int is_softmmu)
{
uint32_t phys_addr;
+ target_ulong page_size;
int prot;
int ret, is_user;
is_user = mmu_idx == MMU_USER_IDX;
- ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
+ ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
+ &page_size);
if (ret == 0) {
/* Map a single [sub]page. */
phys_addr &= ~(uint32_t)0x3ff;
address &= ~(uint32_t)0x3ff;
- return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
- is_softmmu);
+ tlb_set_page (env, address, phys_addr, prot | PAGE_EXEC, mmu_idx,
+ page_size);
+ return 0;
}
if (access_type == 2) {
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
uint32_t phys_addr;
+ target_ulong page_size;
int prot;
int ret;
- ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
+ ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
if (ret != 0)
return -1;
tlb_flush(env, 0);
break;
case 1: /* Invalidate single TLB entry. */
-#if 0
- /* ??? This is wrong for large pages and sections. */
- /* As an ugly hack to make linux work we always flush a 4K
- pages. */
- val &= 0xfffff000;
- tlb_flush_page(env, val);
- tlb_flush_page(env, val + 0x400);
- tlb_flush_page(env, val + 0x800);
- tlb_flush_page(env, val + 0xc00);
-#else
- tlb_flush(env, 1);
-#endif
+ tlb_flush_page(env, val & TARGET_PAGE_MASK);
break;
case 2: /* Invalidate on ASID. */
tlb_flush(env, val == 0);
*/
phy = res.phy & ~0x80000000;
prot = res.prot;
- r = tlb_set_page(env, address & TARGET_PAGE_MASK,
- phy, prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK, phy,
+ prot | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
}
if (r > 0)
D_LOG("%s returns %d irqreq=%x addr=%x"
-1 = cannot handle fault
0 = nothing more to do
1 = generate PF fault
- 2 = soft MMU activation required for this block
*/
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write1, int mmu_idx, int is_softmmu)
{
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
- int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
+ int error_code, is_dirty, prot, page_size, is_write, is_user;
target_phys_addr_t paddr;
uint32_t page_offset;
target_ulong vaddr, virt_addr;
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset;
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
do_fault_protect:
error_code = PG_ERROR_P_MASK;
do_fault:
int prot;
address &= TARGET_PAGE_MASK;
- prot = PAGE_READ | PAGE_WRITE;
- return tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
/* Notify CPU of a pending interrupt. Prioritization and vectoring should
DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot));
- r = tlb_set_page(env, vaddr,
- paddr, lu.prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
} else {
env->sregs[SR_EAR] = address;
DMMU(qemu_log("mmu=%d miss v=%x\n", mmu_idx, address));
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
- r = tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
}
return r;
}
qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n",
__func__, address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
- ret = tlb_set_page(env, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
} else if (ret < 0)
#endif
{
}
ret = get_physical_address(env, &ctx, address, rw, access_type);
if (ret == 0) {
- ret = tlb_set_page_exec(env, address & TARGET_PAGE_MASK,
- ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
} else if (ret < 0) {
LOG_MMU_STATE(env);
if (access_type == ACCESS_CODE) {
/* XXX: implement mmu */
phys = address;
- prot = PAGE_READ | PAGE_WRITE;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return tlb_set_page(env, address & TARGET_PAGE_MASK,
- phys & TARGET_PAGE_MASK, prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ phys & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
#endif /* CONFIG_USER_ONLY */
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
- return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
int *prot, int *access_index,
- target_ulong address, int rw, int mmu_idx)
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
{
int access_perms = 0;
target_phys_addr_t pde_ptr;
is_user = mmu_idx == MMU_USER_IDX;
if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+ *page_size = TARGET_PAGE_SIZE;
// Boot mode: instruction fetches are taken from PROM
if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
*physical = env->prom_addr | (address & 0x7ffffULL);
page_offset = (address & TARGET_PAGE_MASK) &
(TARGET_PAGE_SIZE - 1);
}
+ *page_size = TARGET_PAGE_SIZE;
break;
case 2: /* L2 PTE */
page_offset = address & 0x3ffff;
+ *page_size = 0x40000;
}
break;
case 2: /* L1 PTE */
page_offset = address & 0xffffff;
+ *page_size = 0x1000000;
}
}
{
target_phys_addr_t paddr;
target_ulong vaddr;
- int error_code = 0, prot, ret = 0, access_index;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx);
+ address, rw, mmu_idx, &page_size);
if (error_code == 0) {
vaddr = address & TARGET_PAGE_MASK;
paddr &= TARGET_PAGE_MASK;
printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
TARGET_FMT_lx "\n", address, paddr, vaddr);
#endif
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
}
if (env->mmuregs[3]) /* Fault status register */
// switching to normal mode.
vaddr = address & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
} else {
if (rw & 2)
env->exception_index = TT_TFAULT;
static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
int *prot, int *access_index,
- target_ulong address, int rw, int mmu_idx)
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
{
int is_user = mmu_idx == MMU_USER_IDX;
+ /* ??? We treat everything as a small page, then explicitly flush
+ everything when an entry is evicted. */
+ *page_size = TARGET_PAGE_SIZE;
if (rw == 2)
return get_physical_address_code(env, physical, prot, address,
is_user);
{
target_ulong virt_addr, vaddr;
target_phys_addr_t paddr;
- int error_code = 0, prot, ret = 0, access_index;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx);
+ address, rw, mmu_idx, &page_size);
if (error_code == 0) {
virt_addr = address & TARGET_PAGE_MASK;
vaddr = virt_addr + ((address & TARGET_PAGE_MASK) &
printf("Translate at 0x%" PRIx64 " -> 0x%" PRIx64 ", vaddr 0x%" PRIx64
"\n", address, paddr, vaddr);
#endif
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
}
// XXX
return 1;
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
target_phys_addr_t phys_addr;
+ target_ulong page_size;
int prot, access_index;
if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2,
- MMU_KERNEL_IDX) != 0)
+ MMU_KERNEL_IDX, &page_size) != 0)
if (get_physical_address(env, &phys_addr, &prot, &access_index, addr,
- 0, MMU_KERNEL_IDX) != 0)
+ 0, MMU_KERNEL_IDX, &page_size) != 0)
return -1;
if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED)
return -1;