/* simulator.c -- Interface for the AArch64 simulator.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2017 Free Software Foundation, Inc.
Contributed by Red Hat.
" exe addr %" PRIx64, \
__LINE__, aarch64_get_PC (cpu)); \
if (! TRACE_ANY_P (cpu)) \
- { \
- sim_io_eprintf (CPU_STATE (cpu), "SIM Error: Unimplemented instruction: "); \
- trace_disasm (CPU_STATE (cpu), cpu, aarch64_get_PC (cpu)); \
- } \
+ sim_io_eprintf (CPU_STATE (cpu), "SIM Error: Unimplemented instruction: %#08x\n", \
+ aarch64_get_instr (cpu)); \
sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),\
sim_stopped, SIM_SIGABRT); \
} \
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, (uint32_t) aarch64_get_mem_s8
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s8
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_mem_u16
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s16
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s16
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ offset));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u32 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
aarch64_get_reg_u32 (cpu, rd, NO_SP));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u64 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
aarch64_get_reg_u64 (cpu, rd, NO_SP));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u8 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
aarch64_get_reg_u8 (cpu, rd, NO_SP));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u16 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
aarch64_get_reg_u16 (cpu, rd, NO_SP));
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_mem_u32
(cpu, aarch64_get_PC (cpu) + offset * 4));
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_mem_u64
(cpu, aarch64_get_PC (cpu) + offset * 4));
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_mem_s32
(cpu, aarch64_get_PC (cpu) + offset * 4));
{
unsigned int rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u32 (cpu, rd, 0,
aarch64_get_mem_u32
(cpu, aarch64_get_PC (cpu) + offset * 4));
{
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, st, 0,
aarch64_get_mem_u64
(cpu, aarch64_get_PC (cpu) + offset * 4));
uint64_t addr = aarch64_get_PC (cpu) + offset * 4;
FRegister a;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_get_mem_long_double (cpu, addr, & a);
aarch64_set_FP_long_double (cpu, st, a);
}
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32 (cpu, address));
if (wb == Post)
address += offset;
unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u8 (cpu, rd, 0, aarch64_get_mem_u32 (cpu, addr));
}
unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 16);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u16 (cpu, rd, 0, aarch64_get_mem_u16 (cpu, addr));
}
unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 32);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u32 (cpu, rd, 0, aarch64_get_mem_u32 (cpu, addr));
}
unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 64);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_mem_u64 (cpu, addr));
}
unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 128);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_mem_u64 (cpu, addr));
aarch64_set_vec_u64 (cpu, rd, 1, aarch64_get_mem_u64 (cpu, addr + 8));
}
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 32, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32
(cpu, address + displacement));
}
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_mem_u64 (cpu, address));
if (wb == Post)
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_get_mem_long_double (cpu, address, & a);
aarch64_set_FP_long_double (cpu, st, a);
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 (cpu, address));
if (wb == Post)
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 32, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP,
aarch64_get_mem_u32 (cpu, address + displacement));
}
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 (cpu, address));
if (wb == Post)
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 64, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP,
aarch64_get_mem_u64 (cpu, address + displacement));
}
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be
there is no scaling required for a byte load. */
aarch64_set_reg_u64 (cpu, rt, NO_SP,
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8 (cpu, address));
if (wb == Post)
int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
extension);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* There is no scaling required for a byte load. */
aarch64_set_reg_u64 (cpu, rt, NO_SP,
aarch64_get_mem_u8 (cpu, address + displacement));
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
val = aarch64_get_mem_s8 (cpu, address);
aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
extension);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* There is no scaling required for a byte load. */
aarch64_set_reg_s64 (cpu, rt, NO_SP,
aarch64_get_mem_s8 (cpu, address + displacement));
unsigned rt = INSTR (4, 0);
uint32_t val;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
val = aarch64_get_mem_u16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 16));
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u32 (cpu, rt, NO_SP, aarch64_get_mem_u16 (cpu, address));
if (wb == Post)
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 16, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u32 (cpu, rt, NO_SP,
aarch64_get_mem_u16 (cpu, address + displacement));
}
unsigned rt = INSTR (4, 0);
int32_t val;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
val = aarch64_get_mem_s16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 16));
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s32 (cpu, rt, NO_SP,
(int32_t) aarch64_get_mem_s16 (cpu, address));
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 16, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s32 (cpu, rt, NO_SP,
(int32_t) aarch64_get_mem_s16
(cpu, address + displacement));
unsigned rt = INSTR (4, 0);
int64_t val;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
val = aarch64_get_mem_s16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 16));
if (rn == rt && wb != NoWriteBack)
HALT_UNALLOC;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
if (wb != Post)
uint64_t displacement = OPT_SCALE (extended, 16, scaling);
int64_t val;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
val = aarch64_get_mem_s16 (cpu, address + displacement);
aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
}
unsigned rt = INSTR (4, 0);
int64_t val;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
val = aarch64_get_mem_s32 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 32));
/* The target register may not be SP but the source may be. */
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s32 (cpu, address));
if (wb == Post)
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 32, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s64 (cpu, rt, NO_SP,
aarch64_get_mem_s32 (cpu, address + displacement));
}
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
aarch64_set_mem_u32 (cpu, (aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 32)),
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u32 (cpu, address, aarch64_get_reg_u32 (cpu, rt, NO_SP));
if (wb == Post)
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 32, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u32 (cpu, address + displacement,
aarch64_get_reg_u64 (cpu, rt, NO_SP));
}
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u64 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 64),
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u64 (cpu, address, aarch64_get_reg_u64 (cpu, rt, NO_SP));
if (wb == Post)
extension);
uint64_t displacement = OPT_SCALE (extended, 64, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u64 (cpu, address + displacement,
aarch64_get_reg_u64 (cpu, rt, NO_SP));
}
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be.
There is no scaling required for a byte load. */
aarch64_set_mem_u8 (cpu,
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u8 (cpu, address, aarch64_get_reg_u8 (cpu, rt, NO_SP));
if (wb == Post)
int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
extension);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* There is no scaling required for a byte load. */
aarch64_set_mem_u8 (cpu, address + displacement,
aarch64_get_reg_u8 (cpu, rt, NO_SP));
unsigned rn = INSTR (9, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The target register may not be SP but the source may be. */
aarch64_set_mem_u16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 16),
if (wb != Post)
address += offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u16 (cpu, address, aarch64_get_reg_u16 (cpu, rt, NO_SP));
if (wb == Post)
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 16, scaling);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_mem_u16 (cpu, address + displacement,
aarch64_get_reg_u16 (cpu, rt, NO_SP));
}
/* int ordered = INSTR (15, 15); */
/* int exclusive = ! INSTR (23, 23); */
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (size)
{
case 0:
case 3: aarch64_set_mem_u64 (cpu, address, data); break;
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rs, NO_SP, 0); /* Always exclusive... */
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK) + aimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + aimm);
}
aarch64_set_CPSR (cpu, flags);
}
+#define NEG(a) (((a) & signbit) == signbit)
+#define POS(a) (((a) & signbit) == 0)
+
static void
set_flags_for_add64 (sim_cpu *cpu, uint64_t value1, uint64_t value2)
{
- int64_t sval1 = value1;
- int64_t sval2 = value2;
- uint64_t result = value1 + value2;
- int64_t sresult = sval1 + sval2;
- uint32_t flags = 0;
+ uint64_t result = value1 + value2;
+ uint32_t flags = 0;
+ uint64_t signbit = 1ULL << 63;
if (result == 0)
flags |= Z;
- if (result & (1ULL << 63))
+ if (NEG (result))
flags |= N;
- if (sval1 < 0)
- {
- if (sval2 < 0)
- {
- /* Negative plus a negative. Overflow happens if
- the result is greater than either of the operands. */
- if (sresult > sval1 || sresult > sval2)
- flags |= V;
- }
- /* else Negative plus a positive. Overflow cannot happen. */
- }
- else /* value1 is +ve. */
- {
- if (sval2 < 0)
- {
- /* Overflow can only occur if we computed "0 - MININT". */
- if (sval1 == 0 && sval2 == (1LL << 63))
- flags |= V;
- }
- else
- {
- /* Postive plus positive - overflow has happened if the
- result is smaller than either of the operands. */
- if (result < value1 || result < value2)
- flags |= V | C;
- }
- }
+ if ( (NEG (value1) && NEG (value2))
+ || (NEG (value1) && POS (result))
+ || (NEG (value2) && POS (result)))
+ flags |= C;
+
+ if ( (NEG (value1) && NEG (value2) && POS (result))
+ || (POS (value1) && POS (value2) && NEG (result)))
+ flags |= V;
aarch64_set_CPSR (cpu, flags);
}
-#define NEG(a) (((a) & signbit) == signbit)
-#define POS(a) (((a) & signbit) == 0)
-
static void
set_flags_for_sub32 (sim_cpu *cpu, uint32_t value1, uint32_t value2)
{
/* TODO : do we need to worry about signs here? */
int32_t value1 = aarch64_get_reg_s32 (cpu, rn, SP_OK);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + aimm);
set_flags_for_add32 (cpu, value1, aimm);
}
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint64_t value2 = aimm;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
set_flags_for_add64 (cpu, value1, value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK) - aimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK) - aimm);
}
uint32_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint32_t value2 = aimm;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
set_flags_for_sub32 (cpu, value1, value2);
}
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint32_t value2 = aimm;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
set_flags_for_sub64 (cpu, value1, value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
+ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
+ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
set_flags_for_add32 (cpu, value1, value2);
}
uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
set_flags_for_add64 (cpu, value1, value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
- shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
- shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
set_flags_for_sub32 (cpu, value1, value2);
}
uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
set_flags_for_sub64 (cpu, value1, value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK)
+ (extreg32 (cpu, rm, extension) << shift));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ (extreg64 (cpu, rm, extension) << shift));
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK);
uint32_t value2 = extreg32 (cpu, rm, extension) << shift;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
set_flags_for_add32 (cpu, value1, value2);
}
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint64_t value2 = extreg64 (cpu, rm, extension) << shift;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
set_flags_for_add64 (cpu, value1, value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK)
- (extreg32 (cpu, rm, extension) << shift));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK)
- (extreg64 (cpu, rm, extension) << shift));
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK);
uint32_t value2 = extreg32 (cpu, rm, extension) << shift;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
set_flags_for_sub32 (cpu, value1, value2);
}
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint64_t value2 = extreg64 (cpu, rm, extension) << shift;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
set_flags_for_sub64 (cpu, value1, value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
+ aarch64_get_reg_u32 (cpu, rm, NO_SP)
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
+ aarch64_get_reg_u64 (cpu, rm, NO_SP)
uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP);
uint32_t carry = IS_SET (C);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2 + carry);
set_flags_for_add32 (cpu, value1, value2 + carry);
}
uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
uint64_t carry = IS_SET (C);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2 + carry);
set_flags_for_add64 (cpu, value1, value2 + carry);
}
unsigned rn = INSTR (9, 5); /* ngc iff rn == 31. */
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
- aarch64_get_reg_u32 (cpu, rm, NO_SP)
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
- aarch64_get_reg_u64 (cpu, rm, NO_SP)
uint32_t carry = IS_SET (C);
uint32_t result = value1 - value2 + 1 - carry;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
set_flags_for_sub32 (cpu, value1, value2 + 1 - carry);
}
uint64_t carry = IS_SET (C);
uint64_t result = value1 - value2 + 1 - carry;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
set_flags_for_sub64 (cpu, value1, value2 + 1 - carry);
}
NYI_assert (10, 10, 0);
NYI_assert (4, 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (! testConditionCode (cpu, INSTR (15, 12)))
{
aarch64_set_CPSR (cpu, INSTR (3, 0));
if (INSTR (20, 16) != vs)
HALT_NYI;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (30, 30))
aarch64_set_vec_u64 (cpu, vd, 1, aarch64_get_vec_u64 (cpu, vs, 1));
NYI_assert (29, 21, 0x070);
NYI_assert (17, 10, 0x0F);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (20, 18))
{
case 0x2:
NYI_assert (31, 21, 0x270);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (16, 16))
{
index = INSTR (20, 17);
NYI_assert (29, 21, 0x070);
NYI_assert (15, 10, 0x01);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (16, 16))
{
index = INSTR (20, 17);
NYI_assert (29, 21, 0x070);
NYI_assert (12, 10, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 16 : 8); i++)
{
unsigned int selector = aarch64_get_vec_u8 (cpu, vm, i);
NYI_assert (29, 24, 0x0E);
NYI_assert (13, 10, 0xA);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (29, 20, 0x0E0);
NYI_assert (15, 10, 0x03);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (19, 16))
{
case 1:
uint64_t val_n1 = aarch64_get_vec_u64 (cpu, vn, 0);
uint64_t val_n2 = aarch64_get_vec_u64 (cpu, vn, 1);
- uint64_t val1 = 0;
- uint64_t val2 = 0;
+ uint64_t val1;
+ uint64_t val2;
- uint64_t input1 = upper ? val_n1 : val_m1;
- uint64_t input2 = upper ? val_n2 : val_m2;
- unsigned i;
+ uint64_t input2 = full ? val_n2 : val_m1;
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 21, 0);
NYI_assert (15, 15, 0);
NYI_assert (13, 10, 6);
- switch (INSTR (23, 23))
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ switch (INSTR (23, 22))
{
case 0:
- for (i = 0; i < 8; i++)
+ val1 = (val_n1 >> (upper * 8)) & 0xFFULL;
+ val1 |= (val_n1 >> ((upper * 8) + 8)) & 0xFF00ULL;
+ val1 |= (val_n1 >> ((upper * 8) + 16)) & 0xFF0000ULL;
+ val1 |= (val_n1 >> ((upper * 8) + 24)) & 0xFF000000ULL;
+
+ val1 |= (input2 << (32 - (upper * 8))) & 0xFF00000000ULL;
+ val1 |= (input2 << (24 - (upper * 8))) & 0xFF0000000000ULL;
+ val1 |= (input2 << (16 - (upper * 8))) & 0xFF000000000000ULL;
+ val1 |= (input2 << (8 - (upper * 8))) & 0xFF00000000000000ULL;
+
+ if (full)
{
- val1 |= (input1 >> (i * 8)) & (0xFFULL << (i * 8));
- val2 |= (input2 >> (i * 8)) & (0xFFULL << (i * 8));
+ val2 = (val_m1 >> (upper * 8)) & 0xFFULL;
+ val2 |= (val_m1 >> ((upper * 8) + 8)) & 0xFF00ULL;
+ val2 |= (val_m1 >> ((upper * 8) + 16)) & 0xFF0000ULL;
+ val2 |= (val_m1 >> ((upper * 8) + 24)) & 0xFF000000ULL;
+
+ val2 |= (val_m2 << (32 - (upper * 8))) & 0xFF00000000ULL;
+ val2 |= (val_m2 << (24 - (upper * 8))) & 0xFF0000000000ULL;
+ val2 |= (val_m2 << (16 - (upper * 8))) & 0xFF000000000000ULL;
+ val2 |= (val_m2 << (8 - (upper * 8))) & 0xFF00000000000000ULL;
}
break;
case 1:
- for (i = 0; i < 4; i++)
+ val1 = (val_n1 >> (upper * 16)) & 0xFFFFULL;
+ val1 |= (val_n1 >> ((upper * 16) + 16)) & 0xFFFF0000ULL;
+
+ val1 |= (input2 << (32 - (upper * 16))) & 0xFFFF00000000ULL;;
+ val1 |= (input2 << (16 - (upper * 16))) & 0xFFFF000000000000ULL;
+
+ if (full)
{
- val1 |= (input1 >> (i * 16)) & (0xFFFFULL << (i * 16));
- val2 |= (input2 >> (i * 16)) & (0xFFFFULL << (i * 16));
+ val2 = (val_m1 >> (upper * 16)) & 0xFFFFULL;
+ val2 |= (val_m1 >> ((upper * 16) + 16)) & 0xFFFF0000ULL;
+
+ val2 |= (val_m2 << (32 - (upper * 16))) & 0xFFFF00000000ULL;
+ val2 |= (val_m2 << (16 - (upper * 16))) & 0xFFFF000000000000ULL;
}
break;
case 2:
- val1 = ((input1 & 0xFFFFFFFF) | ((input1 >> 32) & 0xFFFFFFFF00000000ULL));
- val2 = ((input2 & 0xFFFFFFFF) | ((input2 >> 32) & 0xFFFFFFFF00000000ULL));
+ val1 = (val_n1 >> (upper * 32)) & 0xFFFFFFFF;
+ val1 |= (input2 << (32 - (upper * 32))) & 0xFFFFFFFF00000000ULL;
+
+ if (full)
+ {
+ val2 = (val_m1 >> (upper * 32)) & 0xFFFFFFFF;
+ val2 |= (val_m2 << (32 - (upper * 32))) & 0xFFFFFFFF00000000ULL;
+ }
+ break;
case 3:
- val1 = input1;
- val2 = input2;
- break;
+ if (! full)
+ HALT_UNALLOC;
+
+ val1 = upper ? val_n2 : val_n1;
+ val2 = upper ? val_m2 : val_m1;
+ break;
}
aarch64_set_vec_u64 (cpu, vd, 0, val1);
NYI_assert (15, 15, 0);
NYI_assert (13, 10, 0xE);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 23))
{
case 0:
NYI_assert (29, 19, 0x1E0);
NYI_assert (11, 10, 1);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (15, 12))
{
case 0x0: /* 32-bit, no shift. */
case 0x8: /* 16-bit, no shift. */
for (i = 0; i < (full ? 8 : 4); i++)
aarch64_set_vec_u16 (cpu, vd, i, val);
- /* Fall through. */
+ break;
+
case 0xd: /* 32-bit, mask shift by 16. */
val <<= 8;
val |= 0xFF;
NYI_assert (29, 19, 0x5E0);
NYI_assert (11, 10, 1);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (15, 12))
{
case 0x0: /* 32-bit, no shift. */
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0x82E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0xC6E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
val += aarch64_get_vec_u8 (cpu, vm, i);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val);
+ aarch64_set_vec_u64 (cpu, rd, 0, val);
return;
case 1:
for (i = 0; i < (full ? 8 : 4); i++)
val += aarch64_get_vec_u16 (cpu, vm, i);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val);
+ aarch64_set_vec_u64 (cpu, rd, 0, val);
return;
case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
+ if (! full)
+ HALT_UNALLOC;
+ for (i = 0; i < 4; i++)
val += aarch64_get_vec_u32 (cpu, vm, i);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val);
+ aarch64_set_vec_u64 (cpu, rd, 0, val);
return;
case 3:
- if (! full)
- HALT_UNALLOC;
- val = aarch64_get_vec_u64 (cpu, vm, 0);
- val += aarch64_get_vec_u64 (cpu, vm, 1);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val);
- return;
+ HALT_UNALLOC;
}
}
NYI_assert (17, 14, 0);
NYI_assert (12, 10, 7);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (13, 13) == 1)
{
if (INSTR (18, 18) == 1)
NYI_assert (28, 24, 0x0E);
NYI_assert (15, 10, 0x30);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* NB: Read source values before writing results, in case
the source and destination vectors are the same. */
switch (INSTR (23, 22))
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x35);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (23, 23))
{
if (INSTR (22, 22))
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x21);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x27);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
- DO_VEC_WIDENING_MUL (full ? 16 : 8, uint16_t, u8, u16);
+ DO_VEC_WIDENING_MUL (full ? 16 : 8, uint8_t, u8, u8);
return;
case 1:
- DO_VEC_WIDENING_MUL (full ? 8 : 4, uint32_t, u16, u32);
+ DO_VEC_WIDENING_MUL (full ? 8 : 4, uint16_t, u16, u16);
return;
case 2:
- DO_VEC_WIDENING_MUL (full ? 4 : 2, uint64_t, u32, u64);
+ DO_VEC_WIDENING_MUL (full ? 4 : 2, uint32_t, u32, u32);
return;
case 3:
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x25);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
static float
fmaxnm (float a, float b)
{
- if (fpclassify (a) == FP_NORMAL)
+ if (! isnan (a))
{
- if (fpclassify (b) == FP_NORMAL)
+ if (! isnan (b))
return a > b ? a : b;
return a;
}
- else if (fpclassify (b) == FP_NORMAL)
+ else if (! isnan (b))
return b;
return a;
}
static float
fminnm (float a, float b)
{
- if (fpclassify (a) == FP_NORMAL)
+ if (! isnan (a))
{
- if (fpclassify (b) == FP_NORMAL)
+ if (! isnan (b))
return a < b ? a : b;
return a;
}
- else if (fpclassify (b) == FP_NORMAL)
+ else if (! isnan (b))
return b;
return a;
}
static double
dmaxnm (double a, double b)
{
- if (fpclassify (a) == FP_NORMAL)
+ if (! isnan (a))
{
- if (fpclassify (b) == FP_NORMAL)
+ if (! isnan (b))
return a > b ? a : b;
return a;
}
- else if (fpclassify (b) == FP_NORMAL)
+ else if (! isnan (b))
return b;
return a;
}
static double
dminnm (double a, double b)
{
- if (fpclassify (a) == FP_NORMAL)
+ if (! isnan (a))
{
- if (fpclassify (b) == FP_NORMAL)
+ if (! isnan (b))
return a < b ? a : b;
return a;
}
- else if (fpclassify (b) == FP_NORMAL)
+ else if (! isnan (b))
return b;
return a;
}
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x31);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
double (* fn)(double, double) = INSTR (23, 23)
NYI_assert (29, 21, 0x071);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_u32 (cpu, vd, i,
aarch64_get_vec_u32 (cpu, vn, i)
NYI_assert (29, 21, 0x173);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i,
( aarch64_get_vec_u8 (cpu, vd, i)
NYI_assert (29, 21, 0x171);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_u32 (cpu, vd, i,
aarch64_get_vec_u32 (cpu, vn, i)
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (test_false)
{
for (i = 0; i < (full ? 16 : 8); i++)
NYI_assert (29, 21, 0x077);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i,
aarch64_get_vec_u8 (cpu, vn, i)
NYI_assert (29, 21, 0x075);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i,
aarch64_get_vec_u8 (cpu, vn, i)
NYI_assert (29, 21, 0x073);
NYI_assert (15, 10, 0x07);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i,
aarch64_get_vec_u8 (cpu, vn, i)
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0x84A);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
- if (bias)
- for (i = 0; i < 8; i++)
- aarch64_set_vec_u8 (cpu, vd, i + 8,
- aarch64_get_vec_u16 (cpu, vs, i) >> 8);
- else
- for (i = 0; i < 8; i++)
- aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vs, i));
+ for (i = 0; i < 8; i++)
+ aarch64_set_vec_u8 (cpu, vd, i + (bias * 8),
+ aarch64_get_vec_u16 (cpu, vs, i));
return;
case 1:
- if (bias)
- for (i = 0; i < 4; i++)
- aarch64_set_vec_u16 (cpu, vd, i + 4,
- aarch64_get_vec_u32 (cpu, vs, i) >> 16);
- else
- for (i = 0; i < 4; i++)
- aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vs, i));
+ for (i = 0; i < 4; i++)
+ aarch64_set_vec_u16 (cpu, vd, i + (bias * 4),
+ aarch64_get_vec_u32 (cpu, vs, i));
return;
case 2:
- if (bias)
- for (i = 0; i < 2; i++)
- aarch64_set_vec_u32 (cpu, vd, i + 4,
- aarch64_get_vec_u64 (cpu, vs, i) >> 32);
- else
- for (i = 0; i < 2; i++)
- aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vs, i));
+ for (i = 0; i < 2; i++)
+ aarch64_set_vec_u32 (cpu, vd, i + (bias * 2),
+ aarch64_get_vec_u64 (cpu, vs, i));
return;
}
}
NYI_assert (20, 17, 8);
NYI_assert (15, 10, 0x2A);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch ((INSTR (29, 29) << 1) | INSTR (16, 16))
{
case 0: /* SMAXV. */
NYI_assert (22, 14, 0x0C3);
NYI_assert (11, 10, 2);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (23, 23))
{
switch (INSTR (13, 12))
NYI_assert (15, 14, 3);
NYI_assert (11, 10, 1);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
double (* func)(double, double);
NYI_assert (29, 23, 0x1C);
NYI_assert (21, 10, 0x876);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (size)
{
if (! full)
{ \
if (vm != 0) \
HALT_NYI; \
- if (INSTR (22, 22)) \
+ if (INSTR (22, 22)) \
{ \
if (! full) \
HALT_NYI; \
#define VEC_FCMP(CMP) \
do \
{ \
- if (INSTR (22, 22)) \
+ if (INSTR (22, 22)) \
{ \
if (! full) \
HALT_NYI; \
NYI_assert (28, 24, 0x0E);
NYI_assert (21, 21, 1);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if ((INSTR (11, 11)
&& INSTR (14, 14))
|| ((INSTR (11, 11) == 0
/* FIXME: What is a signed shift left in this context ?. */
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (29, 24, 0x2E);
NYI_assert (15, 10, 0x11);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x33);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x19);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (29, 29))
{
switch (INSTR (23, 22))
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x1B);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (29, 29))
{
switch (INSTR (23, 22))
if (size == 3)
HALT_UNALLOC;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (30, 29))
{
case 2: /* SSUBL2. */
copy_vn = cpu->fr[vn];
copy_vm = cpu->fr[vm];
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (size)
{
case 0:
NYI_assert (29, 21, 0x070);
NYI_assert (15, 10, 0x0F);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (16, 16))
{
/* Byte transfer. */
NYI_assert (29, 23, 0x1D);
NYI_assert (21, 10, 0x83E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (29, 23, 0x1D);
NYI_assert (21, 10, 0x86E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0x802);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (size)
{
case 0:
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0x806);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (size)
{
case 0:
NYI_assert (28, 22, 0x3C);
NYI_assert (15, 10, 0x29);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (30, 29))
{
case 2: /* SXTL2, SSHLL2. */
NYI_assert (29, 23, 0x1E);
NYI_assert (15, 10, 0x15);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
shift = INSTR (21, 16);
NYI_assert (28, 23, 0x1E);
NYI_assert (15, 10, 0x01);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
shift = 128 - shift;
NYI_assert (15, 12, 0x8);
NYI_assert (10, 10, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (size)
{
case 1:
}
static void
+do_FMLA_by_element (sim_cpu *cpu)
+{
+ /* instr[31] = 0
+ instr[30] = half/full
+ instr[29,23] = 00 1111 1
+ instr[22] = size
+ instr[21] = L
+ instr[20,16] = m
+ instr[15,12] = 0001
+ instr[11] = H
+ instr[10] = 0
+ instr[9,5] = Vn
+ instr[4,0] = Vd */
+
+ unsigned full = INSTR (30, 30);
+ unsigned size = INSTR (22, 22);
+ unsigned L = INSTR (21, 21);
+ unsigned vm = INSTR (20, 16);
+ unsigned H = INSTR (11, 11);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned e;
+
+ NYI_assert (29, 23, 0x1F);
+ NYI_assert (15, 12, 0x1);
+ NYI_assert (10, 10, 0);
+
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (size)
+ {
+ double element1, element2;
+
+ if (! full || L)
+ HALT_UNALLOC;
+
+ element2 = aarch64_get_vec_double (cpu, vm, H);
+
+ for (e = 0; e < 2; e++)
+ {
+ element1 = aarch64_get_vec_double (cpu, vn, e);
+ element1 *= element2;
+ element1 += aarch64_get_vec_double (cpu, vd, e);
+ aarch64_set_vec_double (cpu, vd, e, element1);
+ }
+ }
+ else
+ {
+ float element1;
+ float element2 = aarch64_get_vec_float (cpu, vm, (H << 1) | L);
+
+ for (e = 0; e < (full ? 4 : 2); e++)
+ {
+ element1 = aarch64_get_vec_float (cpu, vn, e);
+ element1 *= element2;
+ element1 += aarch64_get_vec_float (cpu, vd, e);
+ aarch64_set_vec_float (cpu, vd, e, element1);
+ }
+ }
+}
+
+static void
do_vec_op2 (sim_cpu *cpu)
{
/* instr[31] = 0
{
switch (INSTR (15, 10))
{
+ case 0x04:
+ case 0x06:
+ do_FMLA_by_element (cpu);
+ return;
+
case 0x20:
- case 0x22: do_vec_MUL_by_element (cpu); return;
- default: HALT_NYI;
+ case 0x22:
+ do_vec_MUL_by_element (cpu);
+ return;
+
+ default:
+ HALT_NYI;
}
}
else
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 10, 0x82E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (29, 23, 0x5B);
NYI_assert (21, 10, 0x87E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22) == 0)
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_float (cpu, vd, i,
NYI_assert (15, 12, 4);
NYI_assert (10, 10, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 1:
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x21);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x25);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i,
- (aarch64_get_vec_u8 (cpu, vn, i)
- * aarch64_get_vec_u8 (cpu, vm, i))
- - aarch64_get_vec_u8 (cpu, vd, i));
+ aarch64_get_vec_u8 (cpu, vd, i)
+ - (aarch64_get_vec_u8 (cpu, vn, i)
+ * aarch64_get_vec_u8 (cpu, vm, i)));
return;
case 1:
for (i = 0; i < (full ? 8 : 4); i++)
aarch64_set_vec_u16 (cpu, vd, i,
- (aarch64_get_vec_u16 (cpu, vn, i)
- * aarch64_get_vec_u16 (cpu, vm, i))
- - aarch64_get_vec_u16 (cpu, vd, i));
+ aarch64_get_vec_u16 (cpu, vd, i)
+ - (aarch64_get_vec_u16 (cpu, vn, i)
+ * aarch64_get_vec_u16 (cpu, vm, i)));
return;
case 2:
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_u32 (cpu, vd, i,
- (aarch64_get_vec_u32 (cpu, vn, i)
- * aarch64_get_vec_u32 (cpu, vm, i))
- - aarch64_get_vec_u32 (cpu, vd, i));
+ aarch64_get_vec_u32 (cpu, vd, i)
+ - (aarch64_get_vec_u32 (cpu, vn, i)
+ * aarch64_get_vec_u32 (cpu, vm, i)));
return;
default:
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x3F);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x37);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x35);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
/* Extract values before adding them incase vd == vn/vm. */
NYI_assert (29, 23, 0x5D);
NYI_assert (21, 10, 0x87E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (29, 23, 0x5D);
NYI_assert (21, 10, 0x83E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
if (! full)
NYI_assert (29, 10, 0xB8816);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i, ~ aarch64_get_vec_u8 (cpu, vn, i));
}
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 10, 0x812);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (23, 22))
{
case 0:
NYI_assert (15, 15, 0);
NYI_assert (10, 10, 1);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (16, 16))
{
/* Move a byte. */
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 10, 0x802);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (size)
{
case 0:
j = 0;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
for (i = src_index; i < (full ? 16 : 8); i++)
val.b[j ++] = aarch64_get_vec_u8 (cpu, vn, i);
for (i = 0; i < src_index; i++)
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa)
+ aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa)
+ aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa)
- aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa)
- aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa)
+ (- aarch64_get_FP_float (cpu, sn))
* aarch64_get_FP_float (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa)
+ (- aarch64_get_FP_double (cpu, sn))
* aarch64_get_FP_double (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa)
+ aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa)
+ aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm));
NYI_assert (11, 10, 0x1);
NYI_assert (4, 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (! testConditionCode (cpu, INSTR (15, 12)))
{
aarch64_set_CPSR (cpu, INSTR (3, 0));
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
+ aarch64_get_FP_float (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
+ aarch64_get_FP_double (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
/ aarch64_get_FP_float (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
/ aarch64_get_FP_double (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, - (aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm)));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, - (aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm)));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
- aarch64_get_FP_float (cpu, sm));
}
unsigned sn = INSTR ( 9, 5);
unsigned sd = INSTR ( 4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
- aarch64_get_FP_double (cpu, sm));
}
NYI_assert (31, 23, 0x03C);
NYI_assert (15, 10, 0x1E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
aarch64_set_FP_double (cpu, sd,
dminnm (aarch64_get_FP_double (cpu, sn),
NYI_assert (31, 23, 0x03C);
NYI_assert (15, 10, 0x1A);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
aarch64_set_FP_double (cpu, sd,
dmaxnm (aarch64_get_FP_double (cpu, sn),
NYI_assert (31, 23, 0x03C);
NYI_assert (11, 10, 0x3);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, sd, set ? sn : sm);
+ aarch64_set_FP_double (cpu, sd, (set ? aarch64_get_FP_double (cpu, sn)
+ : aarch64_get_FP_double (cpu, sm)));
else
- aarch64_set_FP_float (cpu, sd, set ? sn : sm);
+ aarch64_set_FP_float (cpu, sd, (set ? aarch64_get_FP_float (cpu, sn)
+ : aarch64_get_FP_float (cpu, sm)));
}
/* Store 32 bit unscaled signed 9 bit. */
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
- aarch64_set_mem_u32 (cpu, aarch64_get_reg_u64 (cpu, st, 1) + offset,
- aarch64_get_vec_u32 (cpu, rn, 0));
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ aarch64_set_mem_u32 (cpu, aarch64_get_reg_u64 (cpu, rn, 1) + offset,
+ aarch64_get_vec_u32 (cpu, st, 0));
}
/* Store 64 bit unscaled signed 9 bit. */
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
- aarch64_set_mem_u64 (cpu, aarch64_get_reg_u64 (cpu, st, 1) + offset,
- aarch64_get_vec_u64 (cpu, rn, 0));
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ aarch64_set_mem_u64 (cpu, aarch64_get_reg_u64 (cpu, rn, 1) + offset,
+ aarch64_get_vec_u64 (cpu, st, 0));
}
/* Store 128 bit unscaled signed 9 bit. */
unsigned int st = INSTR (4, 0);
FRegister a;
- aarch64_get_FP_long_double (cpu, rn, & a);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ aarch64_get_FP_long_double (cpu, st, & a);
aarch64_set_mem_long_double (cpu,
- aarch64_get_reg_u64 (cpu, st, 1)
+ aarch64_get_reg_u64 (cpu, rn, 1)
+ offset, a);
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, st, aarch64_get_FP_float (cpu, rn));
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, st, aarch64_get_FP_double (cpu, rn));
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_reg_u32 (cpu, rn, NO_SP));
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_reg_u64 (cpu, rn, NO_SP));
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u32 (cpu, rn, 0));
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u64 (cpu, rn, 0));
}
uint32_t imm = INSTR (20, 13);
float f = fp_immediate_for_encoding_32 (imm);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, f);
}
uint32_t imm = INSTR (20, 13);
double d = fp_immediate_for_encoding_64 (imm);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, d);
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
}
unsigned int rn = INSTR (9, 5);
unsigned int st = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_mem_u64
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
}
FRegister a;
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_get_mem_long_double (cpu, addr, & a);
aarch64_set_FP_long_double (cpu, st, a);
}
unsigned sd = INSTR (4, 0);
float value = aarch64_get_FP_float (cpu, sn);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, fabsf (value));
}
unsigned sd = INSTR (4, 0);
double value = aarch64_get_FP_double (cpu, sn);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, fabs (value));
}
unsigned sn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sn));
}
unsigned sn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sn));
}
unsigned sn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
- aarch64_set_FP_float (cpu, sd, sqrt (aarch64_get_FP_float (cpu, sn)));
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ aarch64_set_FP_float (cpu, sd, sqrtf (aarch64_get_FP_float (cpu, sn)));
}
/* Double square root. */
unsigned sn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd,
sqrt (aarch64_get_FP_double (cpu, sn)));
}
unsigned sn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, sd, (float) aarch64_get_FP_double (cpu, sn));
}
unsigned sn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, sd, (double) aarch64_get_FP_float (cpu, sn));
}
/* FIXME: Add support for rmode == 6 exactness check. */
rmode = uimm (aarch64_get_FPSR (cpu), 23, 22);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
double val = aarch64_get_FP_double (cpu, rs);
NYI_assert (31, 10, 0x7B890);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float (cpu, rd, (float) aarch64_get_FP_half (cpu, rn));
}
NYI_assert (31, 10, 0x7B8B0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double (cpu, rd, (double) aarch64_get_FP_half (cpu, rn));
}
NYI_assert (31, 10, 0x788F0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_half (cpu, rd, aarch64_get_FP_float (cpu, rn));
}
NYI_assert (31, 10, 0x798F0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_half (cpu, rd, (float) aarch64_get_FP_double (cpu, rn));
}
unsigned rn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float
(cpu, sd, (float) aarch64_get_reg_s32 (cpu, rn, NO_SP));
}
unsigned rn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_float
(cpu, sd, (float) aarch64_get_reg_s64 (cpu, rn, NO_SP));
}
unsigned rn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double
(cpu, sd, (double) aarch64_get_reg_s32 (cpu, rn, NO_SP));
}
unsigned rn = INSTR (9, 5);
unsigned sd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_FP_double
(cpu, sd, (double) aarch64_get_reg_s64 (cpu, rn, NO_SP));
}
static const double DOUBLE_LONG_MAX = (double) LONG_MAX;
static const double DOUBLE_LONG_MIN = (double) LONG_MIN;
+#define UINT_MIN 0
+#define ULONG_MIN 0
+static const float FLOAT_UINT_MAX = (float) UINT_MAX;
+static const float FLOAT_UINT_MIN = (float) UINT_MIN;
+static const double DOUBLE_UINT_MAX = (double) UINT_MAX;
+static const double DOUBLE_UINT_MIN = (double) UINT_MIN;
+static const float FLOAT_ULONG_MAX = (float) ULONG_MAX;
+static const float FLOAT_ULONG_MIN = (float) ULONG_MIN;
+static const double DOUBLE_ULONG_MAX = (double) ULONG_MAX;
+static const double DOUBLE_ULONG_MIN = (double) ULONG_MIN;
+
/* Check for FP exception conditions:
NaN raises IO
Infinity raises IO
RAISE_EXCEPTIONS (f, value, FLOAT, INT);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* Avoid sign extension to 64 bit. */
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
}
RAISE_EXCEPTIONS (f, value, FLOAT, LONG);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
}
RAISE_EXCEPTIONS (d, value, DOUBLE, INT);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* Avoid sign extension to 64 bit. */
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
}
RAISE_EXCEPTIONS (d, value, DOUBLE, LONG);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
}
/* Convert to fixed point. */
HALT_NYI;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (31, 31))
{
/* Convert to unsigned 64-bit integer. */
/* Do not raise an exception if we have reached ULONG_MAX. */
if (value != (1UL << 63))
- RAISE_EXCEPTIONS (d, value, DOUBLE, LONG);
+ RAISE_EXCEPTIONS (d, value, DOUBLE, ULONG);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
}
/* Do not raise an exception if we have reached ULONG_MAX. */
if (value != (1UL << 63))
- RAISE_EXCEPTIONS (f, value, FLOAT, LONG);
+ RAISE_EXCEPTIONS (f, value, FLOAT, ULONG);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
}
value = (uint32_t) d;
/* Do not raise an exception if we have reached UINT_MAX. */
if (value != (1UL << 31))
- RAISE_EXCEPTIONS (d, value, DOUBLE, INT);
+ RAISE_EXCEPTIONS (d, value, DOUBLE, UINT);
}
else
{
value = (uint32_t) f;
/* Do not raise an exception if we have reached UINT_MAX. */
if (value != (1UL << 31))
- RAISE_EXCEPTIONS (f, value, FLOAT, INT);
+ RAISE_EXCEPTIONS (f, value, FLOAT, UINT);
}
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
HALT_NYI;
/* FIXME: Add exception raising. */
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (31, 31))
{
uint64_t value = aarch64_get_reg_u64 (cpu, rs, NO_SP);
if (INSTR (15, 10) != 0)
HALT_UNALLOC;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (16, 16))
aarch64_set_vec_u64 (cpu, rd, 1, aarch64_get_reg_u64 (cpu, rn, NO_SP));
else
{
uint32_t flags;
+ /* FIXME: Add exception raising. */
if (isnan (fvalue1) || isnan (fvalue2))
flags = C|V;
+ else if (isinf (fvalue1) && isinf (fvalue2))
+ {
+ /* Subtracting two infinities may give a NaN. We only need to compare
+ the signs, which we can get from isinf. */
+ int result = isinf (fvalue1) - isinf (fvalue2);
+
+ if (result == 0)
+ flags = Z|C;
+ else if (result < 0)
+ flags = N;
+ else /* (result > 0). */
+ flags = C;
+ }
else
{
float result = fvalue1 - fvalue2;
float fvalue1 = aarch64_get_FP_float (cpu, sn);
float fvalue2 = aarch64_get_FP_float (cpu, sm);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_float_compare (cpu, fvalue1, fvalue2);
}
unsigned sn = INSTR ( 9, 5);
float fvalue1 = aarch64_get_FP_float (cpu, sn);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_float_compare (cpu, fvalue1, 0.0f);
}
float fvalue1 = aarch64_get_FP_float (cpu, sn);
float fvalue2 = aarch64_get_FP_float (cpu, sm);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_float_compare (cpu, fvalue1, fvalue2);
}
unsigned sn = INSTR ( 9, 5);
float fvalue1 = aarch64_get_FP_float (cpu, sn);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_float_compare (cpu, fvalue1, 0.0f);
}
{
uint32_t flags;
+ /* FIXME: Add exception raising. */
if (isnan (dval1) || isnan (dval2))
flags = C|V;
+ else if (isinf (dval1) && isinf (dval2))
+ {
+ /* Subtracting two infinities may give a NaN. We only need to compare
+ the signs, which we can get from isinf. */
+ int result = isinf (dval1) - isinf (dval2);
+
+ if (result == 0)
+ flags = Z|C;
+ else if (result < 0)
+ flags = N;
+ else /* (result > 0). */
+ flags = C;
+ }
else
{
double result = dval1 - dval2;
double dvalue1 = aarch64_get_FP_double (cpu, sn);
double dvalue2 = aarch64_get_FP_double (cpu, sm);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_double_compare (cpu, dvalue1, dvalue2);
}
unsigned sn = INSTR ( 9, 5);
double dvalue1 = aarch64_get_FP_double (cpu, sn);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_double_compare (cpu, dvalue1, 0.0);
}
double dvalue1 = aarch64_get_FP_double (cpu, sn);
double dvalue2 = aarch64_get_FP_double (cpu, sm);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_double_compare (cpu, dvalue1, dvalue2);
}
unsigned sn = INSTR ( 9, 5);
double dvalue1 = aarch64_get_FP_double (cpu, sn);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
set_flags_for_double_compare (cpu, dvalue1, 0.0);
}
NYI_assert (31, 23, 0x0FC);
NYI_assert (21, 10, 0xC36);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
double val1 = aarch64_get_vec_double (cpu, Fn, 0);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x35);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
aarch64_set_FP_double (cpu, rd,
fabs (aarch64_get_FP_double (cpu, rn)
NYI_assert (31, 21, 0x2F7);
NYI_assert (15, 10, 0x0D);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, rd, 0,
aarch64_get_vec_u64 (cpu, rn, 0) >
aarch64_get_vec_u64 (cpu, rm, 0) ? -1L : 0L);
NYI_assert (31, 23, 0x0FE);
NYI_assert (15, 10, 0x01);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, rd, 0,
aarch64_get_vec_u64 (cpu, rn, 0) >> amount);
}
NYI_assert (31, 21, 0x2F7);
NYI_assert (15, 10, 0x11);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (shift >= 0)
aarch64_set_vec_s64 (cpu, rd, 0,
aarch64_get_vec_s64 (cpu, rn, 0) << shift);
if (INSTR (22, 22) == 0)
HALT_UNALLOC;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
switch (INSTR (15, 10))
{
case 0x01: /* SSHR */
NYI_assert (15, 12, 0xE);
NYI_assert (10, 10, 1);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
double val1 = aarch64_get_FP_double (cpu, rn);
NYI_assert (31, 21, 0x2F0);
NYI_assert (15, 10, 0x01);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (16, 16))
{
/* 8-bit. */
NYI_assert (31, 10, 0x1FB82E);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_vec_u64 (cpu, rd, 0, - aarch64_get_vec_u64 (cpu, rn, 0));
}
NYI_assert (31, 21, 0x3F7);
NYI_assert (15, 10, 0x11);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (shift >= 0)
aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_vec_u64 (cpu, rn, 0) << shift);
else
Fm = INSTR (9, 5);
Fn = INSTR (20, 16);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
val1 = aarch64_get_FP_double (cpu, Fm);
val2 = aarch64_get_FP_double (cpu, Fn);
NYI_assert (31, 23, 0x0FC);
NYI_assert (21, 10, 0x876);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (INSTR (22, 22))
{
uint64_t val = aarch64_get_vec_u64 (cpu, rn, 0);
address &= ~0xfff;
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, address + offset);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, NO_SP) & bimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, NO_SP) & bimm);
}
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = bimm;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
set_flags_for_binop32 (cpu, value1 & value2);
}
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = bimm;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
set_flags_for_binop64 (cpu, value1 & value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, NO_SP) ^ bimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, NO_SP) ^ bimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, NO_SP) | bimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, NO_SP) | bimm);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
& shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
& shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
set_flags_for_binop32 (cpu, value1 & value2);
}
uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
set_flags_for_binop64 (cpu, value1 & value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
& ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
& ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
uint32_t value2 = ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
set_flags_for_binop32 (cpu, value1 & value2);
}
uint64_t value2 = ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
shift, count);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
set_flags_for_binop64 (cpu, value1 & value2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
^ ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
^ ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
^ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
^ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
| shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
| shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
| ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
| ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, val << (pos * 16));
}
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, ((uint64_t) val) << (pos * 16));
}
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, ((val << (pos * 16)) ^ 0xffffffffU));
}
{
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, ((((uint64_t) val) << (pos * 16))
^ 0xffffffffffffffffULL));
uint32_t value = val << (pos * 16);
uint32_t mask = ~(0xffffU << (pos * 16));
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (value | (current & mask)));
}
uint64_t value = (uint64_t) val << (pos * 16);
uint64_t mask = ~(0xffffULL << (pos * 16));
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (value | (current & mask)));
}
value >>= r - (s + 1);
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
}
value >>= r - (s + 1);
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
}
value >>= r - (s + 1);
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
}
value >>= r - (s + 1);
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
rd = INSTR (4, 0);
aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
}
value2 &= ~mask;
value2 |= value;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64
(cpu, rd, NO_SP, (aarch64_get_reg_u32 (cpu, rd, NO_SP) & ~mask) | value);
}
mask >>= r - (s + 1);
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, (aarch64_get_reg_u64 (cpu, rd, NO_SP) & ~mask) | value);
val2 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
val2 <<= (32 - imms);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP, val1 | val2);
}
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, ra, NO_SP)
+ aarch64_get_reg_u32 (cpu, rn, NO_SP)
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, ra, NO_SP)
- + aarch64_get_reg_u64 (cpu, rn, NO_SP)
- * aarch64_get_reg_u64 (cpu, rm, NO_SP));
+ + (aarch64_get_reg_u64 (cpu, rn, NO_SP)
+ * aarch64_get_reg_u64 (cpu, rm, NO_SP)));
}
/* 32 bit multiply and sub. */
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, ra, NO_SP)
- aarch64_get_reg_u32 (cpu, rn, NO_SP)
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, ra, NO_SP)
- aarch64_get_reg_u64 (cpu, rn, NO_SP)
uint64_t value2_hi = highWordToU64 (value2);
/* Cross-multiply and collect results. */
-
uint64_t xproductlo = value1_lo * value2_lo;
uint64_t xproductmid1 = value1_lo * value2_hi;
uint64_t xproductmid2 = value1_hi * value2_lo;
uvalue2 = value2;
}
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
uresult = mul64hi (uvalue1, uvalue2);
result = uresult;
result *= signum;
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* N.B. we need to multiply the signed 32 bit values in rn, rm to
obtain a 64 bit product. */
aarch64_set_reg_u64
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* N.B. we need to multiply the signed 32 bit values in rn, rm to
obtain a 64 bit product. */
aarch64_set_reg_u64
if (ra != R31)
HALT_UNALLOC;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
mul64hi (aarch64_get_reg_u64 (cpu, rn, NO_SP),
aarch64_get_reg_u64 (cpu, rm, NO_SP)));
static void
bl (sim_cpu *cpu, int32_t offset)
{
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_save_LR (cpu);
aarch64_set_next_PC_by_offset (cpu, offset);
" %*scall %" PRIx64 " [%s]"
" [args: %" PRIx64 " %" PRIx64 " %" PRIx64 "]",
stack_depth, " ", aarch64_get_next_PC (cpu),
- aarch64_get_func (aarch64_get_next_PC (cpu)),
+ aarch64_get_func (CPU_STATE (cpu),
+ aarch64_get_next_PC (cpu)),
aarch64_get_reg_u64 (cpu, 0, NO_SP),
aarch64_get_reg_u64 (cpu, 1, NO_SP),
aarch64_get_reg_u64 (cpu, 2, NO_SP)
br (sim_cpu *cpu)
{
unsigned rn = INSTR (9, 5);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP));
}
{
unsigned rn = INSTR (9, 5);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
/* The pseudo code in the spec says we update LR before fetching.
the value from the rn. */
aarch64_save_LR (cpu);
" %*scall %" PRIx64 " [%s]"
" [args: %" PRIx64 " %" PRIx64 " %" PRIx64 "]",
stack_depth, " ", aarch64_get_next_PC (cpu),
- aarch64_get_func (aarch64_get_next_PC (cpu)),
+ aarch64_get_func (CPU_STATE (cpu),
+ aarch64_get_next_PC (cpu)),
aarch64_get_reg_u64 (cpu, 0, NO_SP),
aarch64_get_reg_u64 (cpu, 1, NO_SP),
aarch64_get_reg_u64 (cpu, 2, NO_SP)
unsigned rn = INSTR (9, 5);
aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP));
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (TRACE_BRANCH_P (cpu))
{
TRACE_BRANCH (cpu,
static void
nop (sim_cpu *cpu)
{
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
}
/* Data synchronization barrier. */
static void
dsb (sim_cpu *cpu)
{
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
}
/* Data memory barrier. */
static void
dmb (sim_cpu *cpu)
{
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
}
/* Instruction synchronization barrier. */
static void
isb (sim_cpu *cpu)
{
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
}
static void
static void
bcc (sim_cpu *cpu, int32_t offset, CondCode cc)
{
- /* the test returns TRUE if CC is met. */
+ /* The test returns TRUE if CC is met. */
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (testConditionCode (cpu, cc))
aarch64_set_next_PC_by_offset (cpu, offset);
}
{
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (aarch64_get_reg_u32 (cpu, rt, NO_SP) != 0)
aarch64_set_next_PC_by_offset (cpu, offset);
}
{
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (aarch64_get_reg_u64 (cpu, rt, NO_SP) != 0)
aarch64_set_next_PC_by_offset (cpu, offset);
}
{
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (aarch64_get_reg_u32 (cpu, rt, NO_SP) == 0)
aarch64_set_next_PC_by_offset (cpu, offset);
}
{
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (aarch64_get_reg_u64 (cpu, rt, NO_SP) == 0)
aarch64_set_next_PC_by_offset (cpu, offset);
}
{
unsigned rt = INSTR (4, 0);
- if (aarch64_get_reg_u64 (cpu, rt, NO_SP) & (1 << pos))
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (aarch64_get_reg_u64 (cpu, rt, NO_SP) & (((uint64_t) 1) << pos))
aarch64_set_next_PC_by_offset (cpu, offset);
}
-/* branch on register bit test zero -- one size fits all. */
+/* Branch on register bit test zero -- one size fits all. */
static void
tbz (sim_cpu *cpu, uint32_t pos, int32_t offset)
{
unsigned rt = INSTR (4, 0);
- if (!(aarch64_get_reg_u64 (cpu, rt, NO_SP) & (1 << pos)))
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (!(aarch64_get_reg_u64 (cpu, rt, NO_SP) & (((uint64_t) 1) << pos)))
aarch64_set_next_PC_by_offset (cpu, offset);
}
instr[18,5] = simm14 : signed offset counted in words
instr[4,0] = uimm5 */
- uint32_t pos = ((INSTR (31, 31) << 4) | INSTR (23, 19));
+ uint32_t pos = ((INSTR (31, 31) << 5) | INSTR (23, 19));
int32_t offset = simm32 (aarch64_get_instr (cpu), 18, 5) << 2;
NYI_assert (30, 25, 0x1b);
{
uint64_t result = 0;
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
if (val != 0xf000)
{
TRACE_SYSCALL (cpu, " HLT [0x%x]", val);
unsigned sys_op2 = INSTR (7, 5);
unsigned rt = INSTR (4, 0);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
aarch64_set_reg_u64 (cpu, rt, NO_SP,
system_get (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2));
}
NYI_assert (31, 20, 0xD51);
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
system_set (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2,
aarch64_get_reg_u64 (cpu, rt, NO_SP));
}
return FALSE;
aarch64_set_next_PC (cpu, pc + 4);
- aarch64_get_instr (cpu) = aarch64_get_mem_u32 (cpu, pc);
+
+ /* Code is always little-endian. */
+ sim_core_read_buffer (CPU_STATE (cpu), cpu, read_map,
+ & aarch64_get_instr (cpu), pc, 4);
+ aarch64_get_instr (cpu) = endian_le2h_4 (aarch64_get_instr (cpu));
TRACE_INSN (cpu, " pc = %" PRIx64 " instr = %08x", pc,
aarch64_get_instr (cpu));
sim_cpu *cpu = STATE_CPU (sd, 0);
while (aarch64_step (cpu))
- aarch64_update_PC (cpu);
+ {
+ aarch64_update_PC (cpu);
+
+ if (sim_events_tick (sd))
+ sim_events_process (sd);
+ }
- sim_engine_halt (sd, NULL, NULL, aarch64_get_PC (cpu),
- sim_exited, aarch64_get_reg_s32 (cpu, R0, SP_OK));
+ sim_engine_halt (sd, cpu, NULL, aarch64_get_PC (cpu),
+ sim_exited, aarch64_get_reg_s32 (cpu, R0, NO_SP));
}
void