#if !defined(CONFIG_USER_ONLY)
/* Flags stored in the low bits of the TLB virtual address. These are
- defined so that fast path ram access is all zeros. */
+ * defined so that fast path ram access is all zeros.
+ * The flags all must be between TARGET_PAGE_BITS and
+ * maximum address alignment bit.
+ */
/* Zero if TLB entry is valid. */
-#define TLB_INVALID_MASK (1 << 3)
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
/* Set if TLB entry references a clean RAM page. The iotlb entry will
contain the page physical address. */
-#define TLB_NOTDIRTY (1 << 4)
+#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
/* Set if TLB entry is an IO callback. */
-#define TLB_MMIO (1 << 5)
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
+
+/* Use this mask to check interception with an alignment mask
+ * in a TCG backend.
+ */
+#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(ADDR_READ)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(ADDR_READ)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
return res;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(addr_write)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
return;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
+ if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
if (!VICTIM_TLB_HIT(addr_write)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
return;
}
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
int tlb_offset = is_read ?
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
- int s_mask = (1 << (opc & MO_SIZE)) - 1;
+ int a_bits = get_alignment_bits(opc);
TCGReg base = TCG_AREG0, x3;
uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */
- if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
- tlb_mask = TARGET_PAGE_MASK | s_mask;
+ if (a_bits >= 0) {
+ /* A byte access or an alignment check required */
+ tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
x3 = addr_reg;
} else {
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
- TCG_REG_X3, addr_reg, s_mask);
+ TCG_REG_X3, addr_reg, (1 << (opc & MO_SIZE)) - 1);
tlb_mask = TARGET_PAGE_MASK;
x3 = TCG_REG_X3;
}
TCGType ttype = TCG_TYPE_I32;
TCGType tlbtype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0, tlbrexw = 0;
- int s_mask = (1 << (opc & MO_SIZE)) - 1;
- bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0;
+ int a_bits = get_alignment_bits(opc);
+ target_ulong tlb_mask;
if (TCG_TARGET_REG_BITS == 64) {
if (TARGET_LONG_BITS == 64) {
}
tcg_out_mov(s, tlbtype, r0, addrlo);
- if (aligned) {
+ if (a_bits >= 0) {
+ /* A byte access or an alignment check required */
tcg_out_mov(s, ttype, r1, addrlo);
+ tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
} else {
/* For unaligned access check that we don't cross pages using
the page address of the last byte. */
- tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask);
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo,
+ (1 << (opc & MO_SIZE)) - 1);
+ tlb_mask = TARGET_PAGE_MASK;
}
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- tgen_arithi(s, ARITH_AND + trexw, r1,
- TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
+ tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
tgen_arithi(s, ARITH_AND + tlbrexw, r0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
TCGReg base = TCG_AREG0;
TCGMemOp s_bits = opc & MO_SIZE;
+ int a_bits = get_alignment_bits(opc);
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 64) {
* the bottom bits and thus trigger a comparison failure on
* unaligned accesses
*/
+ if (a_bits < 0) {
+ a_bits = s_bits;
+ }
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
- } else if (s_bits) {
- /* > byte access, we need to handle alignment */
- if ((opc & MO_AMASK) == MO_ALIGN) {
+ (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ } else if (a_bits) {
+ /* More than byte access, we need to handle alignment */
+ if (a_bits > 0) {
/* Alignment required by the front-end, same as 32-bits */
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
- 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
+ 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
} else {
/* We support unaligned accesses, we need to make sure we fail
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
int mem_index, bool is_ld)
{
- int s_mask = (1 << (opc & MO_SIZE)) - 1;
+ int a_bits = get_alignment_bits(opc);
int ofs, a_off;
uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */
- if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
+ if (a_bits >= 0) {
+ /* A byte access or an alignment check required */
a_off = 0;
- tlb_mask = TARGET_PAGE_MASK | s_mask;
+ tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
} else {
- a_off = s_mask;
+ a_off = (1 << (opc & MO_SIZE)) - 1;
tlb_mask = TARGET_PAGE_MASK;
}
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{
+ /* Trigger the asserts within as early as possible. */
+ (void)get_alignment_bits(op);
+
switch (op & MO_SIZE) {
case MO_8:
op &= ~MO_BSWAP;
[MO_BEQ] = "beq",
};
+static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
+#ifdef ALIGNED_ONLY
+ [MO_UNALN >> MO_ASHIFT] = "un+",
+ [MO_ALIGN >> MO_ASHIFT] = "",
+#else
+ [MO_UNALN >> MO_ASHIFT] = "",
+ [MO_ALIGN >> MO_ASHIFT] = "al+",
+#endif
+ [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
+ [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
+ [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
+ [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
+ [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
+ [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
+};
+
void tcg_dump_ops(TCGContext *s)
{
char buf[128];
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
qemu_log(",$0x%x,%u", op, ix);
} else {
- const char *s_al = "", *s_op;
- if (op & MO_AMASK) {
- if ((op & MO_AMASK) == MO_ALIGN) {
- s_al = "al+";
- } else {
- s_al = "un+";
- }
- }
+ const char *s_al, *s_op;
+ s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
qemu_log(",%s%s,%u", s_al, s_op, ix);
}
#endif
+#ifdef CONFIG_DEBUG_TCG
+# define tcg_debug_assert(X) do { assert(X); } while (0)
+#elif QEMU_GNUC_PREREQ(4, 5)
+# define tcg_debug_assert(X) \
+ do { if (!(X)) { __builtin_unreachable(); } } while (0)
+#else
+# define tcg_debug_assert(X) do { (void)(X); } while (0)
+#endif
+
typedef struct TCGRelocation {
struct TCGRelocation *next;
int type;
#endif
/* MO_UNALN accesses are never checked for alignment.
- MO_ALIGN accesses will result in a call to the CPU's
- do_unaligned_access hook if the guest address is not aligned.
- The default depends on whether the target CPU defines ALIGNED_ONLY. */
- MO_AMASK = 16,
+ * MO_ALIGN accesses will result in a call to the CPU's
+ * do_unaligned_access hook if the guest address is not aligned.
+ * The default depends on whether the target CPU defines ALIGNED_ONLY.
+ * Some architectures (e.g. ARMv8) need the address which is aligned
+ * to a size more than the size of the memory access.
+ * To support such check it's enough the current costless alignment
+ * check implementation in QEMU, but we need to support
+ * an alignment size specifying.
+ * MO_ALIGN supposes a natural alignment
+ * (i.e. the alignment size is the size of a memory access).
+ * Note that an alignment size must be equal or greater
+ * than an access size.
+ * There are three options:
+ * - an alignment to the size of an access (MO_ALIGN);
+ * - an alignment to the specified size that is equal or greater than
+ * an access size (MO_ALIGN_x where 'x' is a size in bytes);
+ * - unaligned access permitted (MO_UNALN).
+ */
+ MO_ASHIFT = 4,
+ MO_AMASK = 7 << MO_ASHIFT,
#ifdef ALIGNED_ONLY
MO_ALIGN = 0,
MO_UNALN = MO_AMASK,
MO_ALIGN = MO_AMASK,
MO_UNALN = 0,
#endif
+ MO_ALIGN_2 = 1 << MO_ASHIFT,
+ MO_ALIGN_4 = 2 << MO_ASHIFT,
+ MO_ALIGN_8 = 3 << MO_ASHIFT,
+ MO_ALIGN_16 = 4 << MO_ASHIFT,
+ MO_ALIGN_32 = 5 << MO_ASHIFT,
+ MO_ALIGN_64 = 6 << MO_ASHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_SSIZE = MO_SIZE | MO_SIGN,
} TCGMemOp;
+/**
+ * get_alignment_bits
+ * @memop: TCGMemOp value
+ *
+ * Extract the alignment size from the memop.
+ *
+ * Returns: 0 in case of byte access (which is always aligned);
+ * positive value - number of alignment bits;
+ * negative value if unaligned access enabled
+ * and this is not a byte access.
+ */
+static inline int get_alignment_bits(TCGMemOp memop)
+{
+ int a = memop & MO_AMASK;
+ int s = memop & MO_SIZE;
+ int r;
+
+ if (a == MO_UNALN) {
+ /* Negative value if unaligned access enabled,
+ * or zero value in case of byte access.
+ */
+ return -s;
+ } else if (a == MO_ALIGN) {
+ /* A natural alignment: return a number of access size bits */
+ r = s;
+ } else {
+ /* Specific alignment size. It must be equal or greater
+ * than the access size.
+ */
+ r = a >> MO_ASHIFT;
+ tcg_debug_assert(r >= s);
+ }
+#if defined(CONFIG_SOFTMMU)
+ /* The requested alignment cannot overlap the TLB flags. */
+ tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0);
+#endif
+ return r;
+}
+
typedef tcg_target_ulong TCGArg;
/* Define a type and accessor macros for variables. Using pointer types
abort();\
} while (0)
-#ifdef CONFIG_DEBUG_TCG
-# define tcg_debug_assert(X) do { assert(X); } while (0)
-#elif QEMU_GNUC_PREREQ(4, 5)
-# define tcg_debug_assert(X) \
- do { if (!(X)) { __builtin_unreachable(); } } while (0)
-#else
-# define tcg_debug_assert(X) do { (void)(X); } while (0)
-#endif
-
void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
#if UINTPTR_MAX == UINT32_MAX