/* aarch64-asm.c -- AArch64 assembler support.
- Copyright 2012 Free Software Foundation, Inc.
+ Copyright 2012, 2013 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of the GNU opcodes library.
const aarch64_opnd_info *info, aarch64_insn *code,
const aarch64_inst *inst)
{
- aarch64_insn value;
+ aarch64_insn value = 0;
/* Number of elements in each structure to be loaded/stored. */
unsigned num = get_opcode_dependent_value (inst->opcode);
const aarch64_inst *inst ATTRIBUTE_UNUSED)
{
aarch64_field field = {0, 0};
- aarch64_insn QSsize; /* fields Q:S:size. */
- aarch64_insn opcodeh2; /* opcode<2:1> */
+ aarch64_insn QSsize = 0; /* fields Q:S:size. */
+ aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
assert (info->reglist.has_index);
MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
const char *
aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
- aarch64_insn *code,
- const aarch64_inst *inst ATTRIBUTE_UNUSED)
+ aarch64_insn *code, const aarch64_inst *inst)
{
/* imm16 */
aarch64_ins_imm (self, info, code, inst);
imm = aarch64_shrink_expanded_imm8 (imm);
assert ((int)imm >= 0);
}
- assert (imm <= 255);
insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
if (kind == AARCH64_MOD_NONE)
{
/* AARCH64_MOD_LSL: shift zeros. */
int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
- assert (esize == 4 || esize == 2);
+ assert (esize == 4 || esize == 2 || esize == 1);
+ /* For 8-bit move immediate, the optional LSL #0 does not require
+ encoding. */
+ if (esize == 1)
+ return NULL;
amount >>= 3;
if (esize == 4)
gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
aarch64_insn *code, const aarch64_inst *inst)
{
- aarch64_insn value;
+ aarch64_insn value = 0;
assert (info->idx == 0);
const char *
aarch64_ins_addr_simm (const aarch64_operand *self,
const aarch64_opnd_info *info,
- aarch64_insn *code, const aarch64_inst *inst)
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
{
int imm;
do_special_encoding (struct aarch64_inst *inst)
{
int idx;
- aarch64_insn value;
+ aarch64_insn value = 0;
DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
copy_operand_info (inst, 2, 1);
}
+/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
+ is equivalent to:
+ USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
+static void
+convert_xtl_to_shll (aarch64_inst *inst)
+{
+ inst->operands[2].qualifier = inst->operands[1].qualifier;
+ inst->operands[2].imm.value = 0;
+}
+
/* Convert
LSR <Xd>, <Xn>, #<shift>
to
}
inst->operands[1].type = AARCH64_OPND_HALF;
is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
- /* This should have been guaranteed by the constraint check. */
- assert (aarch64_wide_constant_p (value, is32, &shift_amount) == TRUE);
+ if (! aarch64_wide_constant_p (value, is32, &shift_amount))
+ /* The constraint check should have guaranteed this wouldn't happen. */
+ assert (0);
value >>= shift_amount;
value &= 0xffff;
inst->operands[1].imm.value = value;
case OP_ROR_IMM:
convert_ror_to_extr (inst);
break;
+ case OP_SXTL:
+ case OP_SXTL2:
+ case OP_UXTL:
+ case OP_UXTL2:
+ convert_xtl_to_shll (inst);
+ break;
default:
break;
}