1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright 2012-2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "aarch64-asm.h"
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
35 the order of M, L, H. */
38 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 const aarch64_field *field;
42 enum aarch64_field_kind kind;
46 num = va_arg (va, uint32_t);
50 kind = va_arg (va, enum aarch64_field_kind);
51 field = &fields[kind];
52 insert_field (kind, code, value, mask);
53 value >>= field->width;
58 /* Operand inserters. */
60 /* Insert register number. */
62 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
64 const aarch64_inst *inst ATTRIBUTE_UNUSED)
66 insert_field (self->fields[0], code, info->reg.regno, 0);
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
74 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
75 aarch64_insn *code, const aarch64_inst *inst)
78 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
79 /* index and/or type */
80 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
82 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
83 if (info->type == AARCH64_OPND_En
84 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info->idx == 1); /* Vn */
88 aarch64_insn value = info->reglane.index << pos;
89 insert_field (FLD_imm4, code, value, 0);
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
100 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
101 insert_field (FLD_imm5, code, value, 0);
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info->qualifier)
110 case AARCH64_OPND_QLF_S_H:
112 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
114 case AARCH64_OPND_QLF_S_S:
116 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
118 case AARCH64_OPND_QLF_S_D:
120 insert_field (FLD_H, code, info->reglane.index, 0);
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
131 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
133 const aarch64_inst *inst ATTRIBUTE_UNUSED)
136 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
138 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
145 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
146 const aarch64_opnd_info *info, aarch64_insn *code,
147 const aarch64_inst *inst)
149 aarch64_insn value = 0;
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num = get_opcode_dependent_value (inst->opcode);
154 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
159 switch (info->reglist.num_regs)
161 case 1: value = 0x7; break;
162 case 2: value = 0xa; break;
163 case 3: value = 0x6; break;
164 case 4: value = 0x2; break;
169 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
180 insert_field (FLD_opcode, code, value, 0);
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
188 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
189 const aarch64_opnd_info *info, aarch64_insn *code,
190 const aarch64_inst *inst)
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
198 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
200 value = (aarch64_insn) 0;
201 if (is_ld1r && info->reglist.num_regs == 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
204 value = (aarch64_insn) 1;
205 insert_field (FLD_S, code, value, 0);
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
213 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
214 const aarch64_opnd_info *info, aarch64_insn *code,
215 const aarch64_inst *inst ATTRIBUTE_UNUSED)
217 aarch64_field field = {0, 0};
218 aarch64_insn QSsize = 0; /* fields Q:S:size. */
219 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
221 assert (info->reglist.has_index);
224 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info->qualifier)
228 case AARCH64_OPND_QLF_S_B:
229 /* Index encoded in "Q:S:size". */
230 QSsize = info->reglist.index;
233 case AARCH64_OPND_QLF_S_H:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize = info->reglist.index << 1;
238 case AARCH64_OPND_QLF_S_S:
239 /* Index encoded in "Q:S". */
240 QSsize = info->reglist.index << 2;
243 case AARCH64_OPND_QLF_S_D:
244 /* Index encoded in "Q". */
245 QSsize = info->reglist.index << 3 | 0x1;
251 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
252 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
253 insert_field_2 (&field, code, opcodeh2, 0);
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
263 const aarch64_opnd_info *info,
264 aarch64_insn *code, const aarch64_inst *inst)
266 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
269 if (inst->opcode->iclass == asimdshf)
273 0000 x SEE AdvSIMD modified immediate
282 Q = (val & 0x1) ? 1 : 0;
283 insert_field (FLD_Q, code, Q, inst->opcode->mask);
287 assert (info->type == AARCH64_OPND_IMM_VLSR
288 || info->type == AARCH64_OPND_IMM_VLSL);
290 if (info->type == AARCH64_OPND_IMM_VLSR)
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm = (16 << (unsigned)val) - info->imm.value;
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm = info->imm.value + (8 << (unsigned)val);
308 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
316 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
318 const aarch64_inst *inst ATTRIBUTE_UNUSED)
321 /* Maximum of two fields to insert. */
322 assert (self->fields[2] == FLD_NIL);
324 imm = info->imm.value;
325 if (operand_need_shift_by_two (self))
327 if (self->fields[1] == FLD_NIL)
328 insert_field (self->fields[0], code, imm, 0);
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code, imm, 0, 2, self->fields[1], self->fields[0]);
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
338 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
339 aarch64_insn *code, const aarch64_inst *inst)
342 aarch64_ins_imm (self, info, code, inst);
344 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
348 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
349 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
351 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
352 const aarch64_opnd_info *info,
354 const aarch64_inst *inst ATTRIBUTE_UNUSED)
356 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
357 uint64_t imm = info->imm.value;
358 enum aarch64_modifier_kind kind = info->shifter.kind;
359 int amount = info->shifter.amount;
360 aarch64_field field = {0, 0};
362 /* a:b:c:d:e:f:g:h */
363 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
365 /* Either MOVI <Dd>, #<imm>
366 or MOVI <Vd>.2D, #<imm>.
367 <imm> is a 64-bit immediate
368 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
369 encoded in "a:b:c:d:e:f:g:h". */
370 imm = aarch64_shrink_expanded_imm8 (imm);
371 assert ((int)imm >= 0);
373 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
375 if (kind == AARCH64_MOD_NONE)
378 /* shift amount partially in cmode */
379 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
380 if (kind == AARCH64_MOD_LSL)
382 /* AARCH64_MOD_LSL: shift zeros. */
383 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
384 assert (esize == 4 || esize == 2 || esize == 1);
385 /* For 8-bit move immediate, the optional LSL #0 does not require
391 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
393 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
397 /* AARCH64_MOD_MSL: shift ones. */
399 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
401 insert_field_2 (&field, code, amount, 0);
406 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
407 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
409 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
411 const aarch64_inst *inst ATTRIBUTE_UNUSED)
413 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
417 /* Insert arithmetic immediate for e.g. the last operand in
418 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
420 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
421 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
424 aarch64_insn value = info->shifter.amount ? 1 : 0;
425 insert_field (self->fields[0], code, value, 0);
426 /* imm12 (unsigned) */
427 insert_field (self->fields[1], code, info->imm.value, 0);
431 /* Insert logical/bitmask immediate for e.g. the last operand in
432 ORR <Wd|WSP>, <Wn>, #<imm>. */
434 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
435 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
438 uint64_t imm = info->imm.value;
439 int is32 = aarch64_get_qualifier_esize (inst->operands[0].qualifier) == 4;
441 if (inst->opcode->op == OP_BIC)
443 if (aarch64_logical_immediate_p (imm, is32, &value) == FALSE)
444 /* The constraint check should have guaranteed this wouldn't happen. */
447 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
452 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
453 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
455 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
456 aarch64_insn *code, const aarch64_inst *inst)
458 aarch64_insn value = 0;
460 assert (info->idx == 0);
463 aarch64_ins_regno (self, info, code, inst);
464 if (inst->opcode->iclass == ldstpair_indexed
465 || inst->opcode->iclass == ldstnapair_offs
466 || inst->opcode->iclass == ldstpair_off
467 || inst->opcode->iclass == loadlit)
470 switch (info->qualifier)
472 case AARCH64_OPND_QLF_S_S: value = 0; break;
473 case AARCH64_OPND_QLF_S_D: value = 1; break;
474 case AARCH64_OPND_QLF_S_Q: value = 2; break;
477 insert_field (FLD_ldst_size, code, value, 0);
482 value = aarch64_get_qualifier_standard_value (info->qualifier);
483 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
489 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
491 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
492 const aarch64_opnd_info *info, aarch64_insn *code,
493 const aarch64_inst *inst ATTRIBUTE_UNUSED)
496 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
500 /* Encode the address operand for e.g.
501 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
503 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
504 const aarch64_opnd_info *info, aarch64_insn *code,
505 const aarch64_inst *inst ATTRIBUTE_UNUSED)
508 enum aarch64_modifier_kind kind = info->shifter.kind;
511 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
513 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
515 if (kind == AARCH64_MOD_LSL)
516 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
517 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
519 if (info->qualifier != AARCH64_OPND_QLF_S_B)
520 S = info->shifter.amount != 0;
522 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
526 Must be #0 if <extend> is explicitly LSL. */
527 S = info->shifter.operator_present && info->shifter.amount_present;
528 insert_field (FLD_S, code, S, 0);
533 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
535 aarch64_ins_addr_simm (const aarch64_operand *self,
536 const aarch64_opnd_info *info,
538 const aarch64_inst *inst ATTRIBUTE_UNUSED)
543 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
544 /* simm (imm9 or imm7) */
545 imm = info->addr.offset.imm;
546 if (self->fields[0] == FLD_imm7)
547 /* scaled immediate in ld/st pair instructions.. */
548 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
549 insert_field (self->fields[0], code, imm, 0);
550 /* pre/post- index */
551 if (info->addr.writeback)
553 assert (inst->opcode->iclass != ldst_unscaled
554 && inst->opcode->iclass != ldstnapair_offs
555 && inst->opcode->iclass != ldstpair_off
556 && inst->opcode->iclass != ldst_unpriv);
557 assert (info->addr.preind != info->addr.postind);
558 if (info->addr.preind)
559 insert_field (self->fields[1], code, 1, 0);
565 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
567 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
568 const aarch64_opnd_info *info,
570 const aarch64_inst *inst ATTRIBUTE_UNUSED)
572 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
575 insert_field (self->fields[0], code, info->addr.base_regno, 0);
577 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
581 /* Encode the address operand for e.g.
582 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
584 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
585 const aarch64_opnd_info *info, aarch64_insn *code,
586 const aarch64_inst *inst ATTRIBUTE_UNUSED)
589 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
591 if (info->addr.offset.is_reg)
592 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
594 insert_field (FLD_Rm, code, 0x1f, 0);
598 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
600 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
601 const aarch64_opnd_info *info, aarch64_insn *code,
602 const aarch64_inst *inst ATTRIBUTE_UNUSED)
605 insert_field (FLD_cond, code, info->cond->value, 0);
609 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
611 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 const aarch64_opnd_info *info, aarch64_insn *code,
613 const aarch64_inst *inst ATTRIBUTE_UNUSED)
615 /* op0:op1:CRn:CRm:op2 */
616 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
617 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
621 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
623 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
624 const aarch64_opnd_info *info, aarch64_insn *code,
625 const aarch64_inst *inst ATTRIBUTE_UNUSED)
628 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
633 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
635 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
639 /* op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
645 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
648 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
649 const aarch64_opnd_info *info, aarch64_insn *code,
650 const aarch64_inst *inst ATTRIBUTE_UNUSED)
653 insert_field (FLD_CRm, code, info->barrier->value, 0);
657 /* Encode the prefetch operation option operand for e.g.
658 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
661 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
662 const aarch64_opnd_info *info, aarch64_insn *code,
663 const aarch64_inst *inst ATTRIBUTE_UNUSED)
666 insert_field (FLD_Rt, code, info->prfop->value, 0);
670 /* Encode the extended register operand for e.g.
671 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
673 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
674 const aarch64_opnd_info *info, aarch64_insn *code,
675 const aarch64_inst *inst ATTRIBUTE_UNUSED)
677 enum aarch64_modifier_kind kind;
680 insert_field (FLD_Rm, code, info->reg.regno, 0);
682 kind = info->shifter.kind;
683 if (kind == AARCH64_MOD_LSL)
684 kind = info->qualifier == AARCH64_OPND_QLF_W
685 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
686 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
688 insert_field (FLD_imm3, code, info->shifter.amount, 0);
693 /* Encode the shifted register operand for e.g.
694 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
696 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
697 const aarch64_opnd_info *info, aarch64_insn *code,
698 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701 insert_field (FLD_Rm, code, info->reg.regno, 0);
703 insert_field (FLD_shift, code,
704 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
706 insert_field (FLD_imm6, code, info->shifter.amount, 0);
711 /* Miscellaneous encoding functions. */
713 /* Encode size[0], i.e. bit 22, for
714 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
717 encode_asimd_fcvt (aarch64_inst *inst)
720 aarch64_field field = {0, 0};
721 enum aarch64_opnd_qualifier qualifier;
723 switch (inst->opcode->op)
727 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
728 qualifier = inst->operands[1].qualifier;
732 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
733 qualifier = inst->operands[0].qualifier;
738 assert (qualifier == AARCH64_OPND_QLF_V_4S
739 || qualifier == AARCH64_OPND_QLF_V_2D);
740 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
741 gen_sub_field (FLD_size, 0, 1, &field);
742 insert_field_2 (&field, &inst->value, value, 0);
745 /* Encode size[0], i.e. bit 22, for
746 e.g. FCVTXN <Vb><d>, <Va><n>. */
749 encode_asisd_fcvtxn (aarch64_inst *inst)
751 aarch64_insn val = 1;
752 aarch64_field field = {0, 0};
753 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
754 gen_sub_field (FLD_size, 0, 1, &field);
755 insert_field_2 (&field, &inst->value, val, 0);
758 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
760 encode_fcvt (aarch64_inst *inst)
763 const aarch64_field field = {15, 2};
766 switch (inst->operands[0].qualifier)
768 case AARCH64_OPND_QLF_S_S: val = 0; break;
769 case AARCH64_OPND_QLF_S_D: val = 1; break;
770 case AARCH64_OPND_QLF_S_H: val = 3; break;
773 insert_field_2 (&field, &inst->value, val, 0);
778 /* Do miscellaneous encodings that are not common enough to be driven by
782 do_misc_encoding (aarch64_inst *inst)
784 switch (inst->opcode->op)
793 encode_asimd_fcvt (inst);
796 encode_asisd_fcvtxn (inst);
802 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
804 encode_sizeq (aarch64_inst *inst)
807 enum aarch64_field_kind kind;
810 /* Get the index of the operand whose information we are going to use
811 to encode the size and Q fields.
812 This is deduced from the possible valid qualifier lists. */
813 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
814 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
815 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
816 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
818 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
820 if (inst->opcode->iclass == asisdlse
821 || inst->opcode->iclass == asisdlsep
822 || inst->opcode->iclass == asisdlso
823 || inst->opcode->iclass == asisdlsop)
824 kind = FLD_vldst_size;
827 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
830 /* Opcodes that have fields shared by multiple operands are usually flagged
831 with flags. In this function, we detect such flags and use the
832 information in one of the related operands to do the encoding. The 'one'
833 operand is not any operand but one of the operands that has the enough
834 information for such an encoding. */
837 do_special_encoding (struct aarch64_inst *inst)
840 aarch64_insn value = 0;
842 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
844 /* Condition for truly conditional executed instructions, e.g. b.cond. */
845 if (inst->opcode->flags & F_COND)
847 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
849 if (inst->opcode->flags & F_SF)
851 idx = select_operand_for_sf_field_coding (inst->opcode);
852 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
853 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
855 insert_field (FLD_sf, &inst->value, value, 0);
856 if (inst->opcode->flags & F_N)
857 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
859 if (inst->opcode->flags & F_SIZEQ)
861 if (inst->opcode->flags & F_FPTYPE)
863 idx = select_operand_for_fptype_field_coding (inst->opcode);
864 switch (inst->operands[idx].qualifier)
866 case AARCH64_OPND_QLF_S_S: value = 0; break;
867 case AARCH64_OPND_QLF_S_D: value = 1; break;
868 case AARCH64_OPND_QLF_S_H: value = 3; break;
871 insert_field (FLD_type, &inst->value, value, 0);
873 if (inst->opcode->flags & F_SSIZE)
875 enum aarch64_opnd_qualifier qualifier;
876 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
877 qualifier = inst->operands[idx].qualifier;
878 assert (qualifier >= AARCH64_OPND_QLF_S_B
879 && qualifier <= AARCH64_OPND_QLF_S_Q);
880 value = aarch64_get_qualifier_standard_value (qualifier);
881 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
883 if (inst->opcode->flags & F_T)
885 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
886 aarch64_field field = {0, 0};
887 enum aarch64_opnd_qualifier qualifier;
890 qualifier = inst->operands[idx].qualifier;
891 assert (aarch64_get_operand_class (inst->opcode->operands[0])
892 == AARCH64_OPND_CLASS_SIMD_REG
893 && qualifier >= AARCH64_OPND_QLF_V_8B
894 && qualifier <= AARCH64_OPND_QLF_V_2D);
905 value = aarch64_get_qualifier_standard_value (qualifier);
906 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
907 num = (int) value >> 1;
908 assert (num >= 0 && num <= 3);
909 gen_sub_field (FLD_imm5, 0, num + 1, &field);
910 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
912 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
914 /* Use Rt to encode in the case of e.g.
915 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
916 enum aarch64_opnd_qualifier qualifier;
917 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
919 /* Otherwise use the result operand, which has to be a integer
922 assert (idx == 0 || idx == 1);
923 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
924 == AARCH64_OPND_CLASS_INT_REG);
925 qualifier = inst->operands[idx].qualifier;
926 insert_field (FLD_Q, &inst->value,
927 aarch64_get_qualifier_standard_value (qualifier), 0);
929 if (inst->opcode->flags & F_LDS_SIZE)
931 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
932 enum aarch64_opnd_qualifier qualifier;
933 aarch64_field field = {0, 0};
934 assert (aarch64_get_operand_class (inst->opcode->operands[0])
935 == AARCH64_OPND_CLASS_INT_REG);
936 gen_sub_field (FLD_opc, 0, 1, &field);
937 qualifier = inst->operands[0].qualifier;
938 insert_field_2 (&field, &inst->value,
939 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
941 /* Miscellaneous encoding as the last step. */
942 if (inst->opcode->flags & F_MISC)
943 do_misc_encoding (inst);
945 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
948 /* Converters converting an alias opcode instruction to its real form. */
950 /* ROR <Wd>, <Ws>, #<shift>
952 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
954 convert_ror_to_extr (aarch64_inst *inst)
956 copy_operand_info (inst, 3, 2);
957 copy_operand_info (inst, 2, 1);
960 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
962 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
964 convert_xtl_to_shll (aarch64_inst *inst)
966 inst->operands[2].qualifier = inst->operands[1].qualifier;
967 inst->operands[2].imm.value = 0;
971 LSR <Xd>, <Xn>, #<shift>
973 UBFM <Xd>, <Xn>, #<shift>, #63. */
975 convert_sr_to_bfm (aarch64_inst *inst)
977 inst->operands[3].imm.value =
978 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
981 /* Convert MOV to ORR. */
983 convert_mov_to_orr (aarch64_inst *inst)
985 /* MOV <Vd>.<T>, <Vn>.<T>
987 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
988 copy_operand_info (inst, 2, 1);
991 /* When <imms> >= <immr>, the instruction written:
992 SBFX <Xd>, <Xn>, #<lsb>, #<width>
994 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
997 convert_bfx_to_bfm (aarch64_inst *inst)
1001 /* Convert the operand. */
1002 lsb = inst->operands[2].imm.value;
1003 width = inst->operands[3].imm.value;
1004 inst->operands[2].imm.value = lsb;
1005 inst->operands[3].imm.value = lsb + width - 1;
1008 /* When <imms> < <immr>, the instruction written:
1009 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1011 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1014 convert_bfi_to_bfm (aarch64_inst *inst)
1018 /* Convert the operand. */
1019 lsb = inst->operands[2].imm.value;
1020 width = inst->operands[3].imm.value;
1021 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1023 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1024 inst->operands[3].imm.value = width - 1;
1028 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1029 inst->operands[3].imm.value = width - 1;
1033 /* The instruction written:
1034 LSL <Xd>, <Xn>, #<shift>
1036 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1039 convert_lsl_to_ubfm (aarch64_inst *inst)
1041 int64_t shift = inst->operands[2].imm.value;
1043 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1045 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1046 inst->operands[3].imm.value = 31 - shift;
1050 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1051 inst->operands[3].imm.value = 63 - shift;
1055 /* CINC <Wd>, <Wn>, <cond>
1057 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1060 convert_to_csel (aarch64_inst *inst)
1062 copy_operand_info (inst, 3, 2);
1063 copy_operand_info (inst, 2, 1);
1064 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1067 /* CSET <Wd>, <cond>
1069 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1072 convert_cset_to_csinc (aarch64_inst *inst)
1074 copy_operand_info (inst, 3, 1);
1075 copy_operand_info (inst, 2, 0);
1076 copy_operand_info (inst, 1, 0);
1077 inst->operands[1].reg.regno = 0x1f;
1078 inst->operands[2].reg.regno = 0x1f;
1079 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1084 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1087 convert_mov_to_movewide (aarch64_inst *inst)
1090 uint32_t shift_amount;
1093 switch (inst->opcode->op)
1095 case OP_MOV_IMM_WIDE:
1096 value = inst->operands[1].imm.value;
1098 case OP_MOV_IMM_WIDEN:
1099 value = ~inst->operands[1].imm.value;
1104 inst->operands[1].type = AARCH64_OPND_HALF;
1105 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1106 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1107 /* The constraint check should have guaranteed this wouldn't happen. */
1109 value >>= shift_amount;
1111 inst->operands[1].imm.value = value;
1112 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1113 inst->operands[1].shifter.amount = shift_amount;
1118 ORR <Wd>, WZR, #<imm>. */
1121 convert_mov_to_movebitmask (aarch64_inst *inst)
1123 copy_operand_info (inst, 2, 1);
1124 inst->operands[1].reg.regno = 0x1f;
1125 inst->operands[1].skip = 0;
1128 /* Some alias opcodes are assembled by being converted to their real-form. */
1131 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1133 const aarch64_opcode *alias = inst->opcode;
1135 if ((alias->flags & F_CONV) == 0)
1136 goto convert_to_real_return;
1142 convert_sr_to_bfm (inst);
1145 convert_lsl_to_ubfm (inst);
1150 convert_to_csel (inst);
1154 convert_cset_to_csinc (inst);
1159 convert_bfx_to_bfm (inst);
1164 convert_bfi_to_bfm (inst);
1167 convert_mov_to_orr (inst);
1169 case OP_MOV_IMM_WIDE:
1170 case OP_MOV_IMM_WIDEN:
1171 convert_mov_to_movewide (inst);
1173 case OP_MOV_IMM_LOG:
1174 convert_mov_to_movebitmask (inst);
1177 convert_ror_to_extr (inst);
1183 convert_xtl_to_shll (inst);
1189 convert_to_real_return:
1190 aarch64_replace_opcode (inst, real);
1193 /* Encode *INST_ORI of the opcode code OPCODE.
1194 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1195 matched operand qualifier sequence in *QLF_SEQ. */
1198 aarch64_opcode_encode (const aarch64_opcode *opcode,
1199 const aarch64_inst *inst_ori, aarch64_insn *code,
1200 aarch64_opnd_qualifier_t *qlf_seq,
1201 aarch64_operand_error *mismatch_detail)
1204 const aarch64_opcode *aliased;
1205 aarch64_inst copy, *inst;
1207 DEBUG_TRACE ("enter with %s", opcode->name);
1209 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1213 assert (inst->opcode == NULL || inst->opcode == opcode);
1214 if (inst->opcode == NULL)
1215 inst->opcode = opcode;
1217 /* Constrain the operands.
1218 After passing this, the encoding is guaranteed to succeed. */
1219 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1221 DEBUG_TRACE ("FAIL since operand constraint not met");
1225 /* Get the base value.
1226 Note: this has to be before the aliasing handling below in order to
1227 get the base value from the alias opcode before we move on to the
1228 aliased opcode for encoding. */
1229 inst->value = opcode->opcode;
1231 /* No need to do anything else if the opcode does not have any operand. */
1232 if (aarch64_num_of_operands (opcode) == 0)
1235 /* Assign operand indexes and check types. Also put the matched
1236 operand qualifiers in *QLF_SEQ to return. */
1237 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1239 assert (opcode->operands[i] == inst->operands[i].type);
1240 inst->operands[i].idx = i;
1241 if (qlf_seq != NULL)
1242 *qlf_seq = inst->operands[i].qualifier;
1245 aliased = aarch64_find_real_opcode (opcode);
1246 /* If the opcode is an alias and it does not ask for direct encoding by
1247 itself, the instruction will be transformed to the form of real opcode
1248 and the encoding will be carried out using the rules for the aliased
1250 if (aliased != NULL && (opcode->flags & F_CONV))
1252 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1253 aliased->name, opcode->name);
1254 /* Convert the operands to the form of the real opcode. */
1255 convert_to_real (inst, aliased);
1259 aarch64_opnd_info *info = inst->operands;
1261 /* Call the inserter of each operand. */
1262 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1264 const aarch64_operand *opnd;
1265 enum aarch64_opnd type = opcode->operands[i];
1266 if (type == AARCH64_OPND_NIL)
1270 DEBUG_TRACE ("skip the incomplete operand %d", i);
1273 opnd = &aarch64_operands[type];
1274 if (operand_has_inserter (opnd))
1275 aarch64_insert_operand (opnd, info, &inst->value, inst);
1278 /* Call opcode encoders indicated by flags. */
1279 if (opcode_has_special_coder (opcode))
1280 do_special_encoding (inst);
1283 DEBUG_TRACE ("exit with %s", opcode->name);
1285 *code = inst->value;