4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
28 #define DATA_SIZE (1 << SHIFT)
33 #define SDATA_TYPE int64_t
34 #define DATA_TYPE uint64_t
38 #define SDATA_TYPE int32_t
39 #define DATA_TYPE uint32_t
43 #define SDATA_TYPE int16_t
44 #define DATA_TYPE uint16_t
48 #define SDATA_TYPE int8_t
49 #define DATA_TYPE uint8_t
51 #error unsupported data size
55 /* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61 # define WORD_TYPE DATA_TYPE
62 # define USUFFIX SUFFIX
64 # define WORD_TYPE tcg_target_ulong
65 # define USUFFIX glue(u, SUFFIX)
66 # define SSUFFIX glue(s, SUFFIX)
69 #ifdef SOFTMMU_CODE_ACCESS
70 #define READ_ACCESS_TYPE MMU_INST_FETCH
71 #define ADDR_READ addr_code
73 #define READ_ACCESS_TYPE MMU_DATA_LOAD
74 #define ADDR_READ addr_read
78 # define BSWAP(X) bswap64(X)
80 # define BSWAP(X) bswap32(X)
82 # define BSWAP(X) bswap16(X)
87 #ifdef TARGET_WORDS_BIGENDIAN
88 # define TGT_BE(X) (X)
89 # define TGT_LE(X) BSWAP(X)
91 # define TGT_BE(X) BSWAP(X)
92 # define TGT_LE(X) (X)
96 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97 # define helper_be_ld_name helper_le_ld_name
98 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99 # define helper_be_lds_name helper_le_lds_name
100 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101 # define helper_be_st_name helper_le_st_name
103 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
111 #ifdef TARGET_WORDS_BIGENDIAN
112 # define helper_te_ld_name helper_be_ld_name
113 # define helper_te_st_name helper_be_st_name
115 # define helper_te_ld_name helper_le_ld_name
116 # define helper_te_st_name helper_le_st_name
119 /* macro to check the victim tlb */
120 #define VICTIM_TLB_HIT(ty) \
122 /* we are about to do a page table walk. our last hope is the \
123 * victim tlb. try to refill from the victim tlb before walking the \
126 CPUIOTLBEntry tmpiotlb; \
127 CPUTLBEntry tmptlb; \
128 for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
129 if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
130 /* found entry in victim tlb, swap tlb and iotlb */ \
131 tmptlb = env->tlb_table[mmu_idx][index]; \
132 env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
133 env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
134 tmpiotlb = env->iotlb[mmu_idx][index]; \
135 env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
136 env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
140 /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
144 #ifndef SOFTMMU_CODE_ACCESS
145 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
146 CPUIOTLBEntry *iotlbentry,
151 CPUState *cpu = ENV_GET_CPU(env);
152 hwaddr physaddr = iotlbentry->addr;
153 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
155 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
156 cpu->mem_io_pc = retaddr;
157 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
158 cpu_io_recompile(cpu, retaddr);
161 cpu->mem_io_vaddr = addr;
162 memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
168 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
169 TCGMemOpIdx oi, uintptr_t retaddr)
171 unsigned mmu_idx = get_mmuidx(oi);
172 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
173 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
174 int a_bits = get_alignment_bits(get_memop(oi));
178 /* Adjust the given return address. */
179 retaddr -= GETPC_ADJ;
181 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
182 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
186 /* If the TLB entry is for a different page, reload and try again. */
187 if ((addr & TARGET_PAGE_MASK)
188 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
189 if (!VICTIM_TLB_HIT(ADDR_READ)) {
190 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
193 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
196 /* Handle an IO access. */
197 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
198 CPUIOTLBEntry *iotlbentry;
199 if ((addr & (DATA_SIZE - 1)) != 0) {
200 goto do_unaligned_access;
202 iotlbentry = &env->iotlb[mmu_idx][index];
204 /* ??? Note that the io helpers always read data in the target
205 byte ordering. We should push the LE/BE request down into io. */
206 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
211 /* Handle slow unaligned access (it spans two pages or IO). */
213 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
214 >= TARGET_PAGE_SIZE)) {
215 target_ulong addr1, addr2;
216 DATA_TYPE res1, res2;
219 addr1 = addr & ~(DATA_SIZE - 1);
220 addr2 = addr1 + DATA_SIZE;
221 /* Note the adjustment at the beginning of the function.
222 Undo that for the recursion. */
223 res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
224 res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
225 shift = (addr & (DATA_SIZE - 1)) * 8;
227 /* Little-endian combine. */
228 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
232 haddr = addr + env->tlb_table[mmu_idx][index].addend;
234 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
236 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
242 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
243 TCGMemOpIdx oi, uintptr_t retaddr)
245 unsigned mmu_idx = get_mmuidx(oi);
246 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
247 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
248 int a_bits = get_alignment_bits(get_memop(oi));
252 /* Adjust the given return address. */
253 retaddr -= GETPC_ADJ;
255 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
256 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
260 /* If the TLB entry is for a different page, reload and try again. */
261 if ((addr & TARGET_PAGE_MASK)
262 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
263 if (!VICTIM_TLB_HIT(ADDR_READ)) {
264 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
267 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
270 /* Handle an IO access. */
271 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
272 CPUIOTLBEntry *iotlbentry;
273 if ((addr & (DATA_SIZE - 1)) != 0) {
274 goto do_unaligned_access;
276 iotlbentry = &env->iotlb[mmu_idx][index];
278 /* ??? Note that the io helpers always read data in the target
279 byte ordering. We should push the LE/BE request down into io. */
280 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
285 /* Handle slow unaligned access (it spans two pages or IO). */
287 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
288 >= TARGET_PAGE_SIZE)) {
289 target_ulong addr1, addr2;
290 DATA_TYPE res1, res2;
293 addr1 = addr & ~(DATA_SIZE - 1);
294 addr2 = addr1 + DATA_SIZE;
295 /* Note the adjustment at the beginning of the function.
296 Undo that for the recursion. */
297 res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
298 res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
299 shift = (addr & (DATA_SIZE - 1)) * 8;
301 /* Big-endian combine. */
302 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
306 haddr = addr + env->tlb_table[mmu_idx][index].addend;
307 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
310 #endif /* DATA_SIZE > 1 */
312 #ifndef SOFTMMU_CODE_ACCESS
314 /* Provide signed versions of the load routines as well. We can of course
315 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
316 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
317 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
318 TCGMemOpIdx oi, uintptr_t retaddr)
320 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
324 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
325 TCGMemOpIdx oi, uintptr_t retaddr)
327 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
332 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
333 CPUIOTLBEntry *iotlbentry,
338 CPUState *cpu = ENV_GET_CPU(env);
339 hwaddr physaddr = iotlbentry->addr;
340 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
342 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
343 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
344 cpu_io_recompile(cpu, retaddr);
347 cpu->mem_io_vaddr = addr;
348 cpu->mem_io_pc = retaddr;
349 memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
353 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
354 TCGMemOpIdx oi, uintptr_t retaddr)
356 unsigned mmu_idx = get_mmuidx(oi);
357 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
358 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
359 int a_bits = get_alignment_bits(get_memop(oi));
362 /* Adjust the given return address. */
363 retaddr -= GETPC_ADJ;
365 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
366 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
370 /* If the TLB entry is for a different page, reload and try again. */
371 if ((addr & TARGET_PAGE_MASK)
372 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
373 if (!VICTIM_TLB_HIT(addr_write)) {
374 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
376 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
379 /* Handle an IO access. */
380 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
381 CPUIOTLBEntry *iotlbentry;
382 if ((addr & (DATA_SIZE - 1)) != 0) {
383 goto do_unaligned_access;
385 iotlbentry = &env->iotlb[mmu_idx][index];
387 /* ??? Note that the io helpers always read data in the target
388 byte ordering. We should push the LE/BE request down into io. */
390 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
394 /* Handle slow unaligned access (it spans two pages or IO). */
396 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
397 >= TARGET_PAGE_SIZE)) {
400 /* XXX: not efficient, but simple */
401 /* Note: relies on the fact that tlb_fill() does not remove the
402 * previous page from the TLB cache. */
403 for (i = DATA_SIZE - 1; i >= 0; i--) {
404 /* Little-endian extract. */
405 uint8_t val8 = val >> (i * 8);
406 /* Note the adjustment at the beginning of the function.
407 Undo that for the recursion. */
408 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
409 oi, retaddr + GETPC_ADJ);
414 haddr = addr + env->tlb_table[mmu_idx][index].addend;
416 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
418 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
423 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
424 TCGMemOpIdx oi, uintptr_t retaddr)
426 unsigned mmu_idx = get_mmuidx(oi);
427 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
428 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
429 int a_bits = get_alignment_bits(get_memop(oi));
432 /* Adjust the given return address. */
433 retaddr -= GETPC_ADJ;
435 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
436 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
440 /* If the TLB entry is for a different page, reload and try again. */
441 if ((addr & TARGET_PAGE_MASK)
442 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
443 if (!VICTIM_TLB_HIT(addr_write)) {
444 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
446 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
449 /* Handle an IO access. */
450 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
451 CPUIOTLBEntry *iotlbentry;
452 if ((addr & (DATA_SIZE - 1)) != 0) {
453 goto do_unaligned_access;
455 iotlbentry = &env->iotlb[mmu_idx][index];
457 /* ??? Note that the io helpers always read data in the target
458 byte ordering. We should push the LE/BE request down into io. */
460 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
464 /* Handle slow unaligned access (it spans two pages or IO). */
466 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
467 >= TARGET_PAGE_SIZE)) {
470 /* XXX: not efficient, but simple */
471 /* Note: relies on the fact that tlb_fill() does not remove the
472 * previous page from the TLB cache. */
473 for (i = DATA_SIZE - 1; i >= 0; i--) {
474 /* Big-endian extract. */
475 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
476 /* Note the adjustment at the beginning of the function.
477 Undo that for the recursion. */
478 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
479 oi, retaddr + GETPC_ADJ);
484 haddr = addr + env->tlb_table[mmu_idx][index].addend;
485 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
487 #endif /* DATA_SIZE > 1 */
490 /* Probe for whether the specified guest write access is permitted.
491 * If it is not permitted then an exception will be taken in the same
492 * way as if this were a real write access (and we will not return).
493 * Otherwise the function will return, and there will be a valid
494 * entry in the TLB for this access.
496 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
499 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
500 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
502 if ((addr & TARGET_PAGE_MASK)
503 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
504 /* TLB entry is for a different page */
505 if (!VICTIM_TLB_HIT(addr_write)) {
506 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
511 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
513 #undef READ_ACCESS_TYPE
529 #undef helper_le_ld_name
530 #undef helper_be_ld_name
531 #undef helper_le_lds_name
532 #undef helper_be_lds_name
533 #undef helper_le_st_name
534 #undef helper_be_st_name
535 #undef helper_te_ld_name
536 #undef helper_te_st_name