1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
47 num = va_arg (va, uint32_t);
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
67 enum aarch64_field_kind kind;
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
86 insert_field (self->fields[0], code, info->reg.regno, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
130 case AARCH64_OPND_QLF_S_H:
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
134 case AARCH64_OPND_QLF_S_S:
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
138 case AARCH64_OPND_QLF_S_D:
140 insert_field (FLD_H, code, info->reglane.index, 0);
149 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
151 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
162 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
165 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
169 aarch64_insn value = 0;
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
179 switch (info->reglist.num_regs)
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
200 insert_field (FLD_opcode, code, value, 0);
205 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
208 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
230 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
233 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
237 aarch64_field field = {0, 0};
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
241 assert (info->reglist.has_index);
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
278 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
282 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
289 if (inst->opcode->iclass == asimdshf)
293 0000 x SEE AdvSIMD modified immediate
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
310 if (info->type == AARCH64_OPND_IMM_VLSR)
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
333 /* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
336 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
345 insert_all_fields (self, code, imm);
349 /* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
352 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
353 aarch64_insn *code, const aarch64_inst *inst)
356 aarch64_ins_imm (self, info, code, inst);
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
362 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
365 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
389 if (kind == AARCH64_MOD_NONE)
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
411 /* AARCH64_MOD_MSL: shift ones. */
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
415 insert_field_2 (&field, code, amount, 0);
420 /* Insert fields for an 8-bit floating-point immediate. */
422 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
426 insert_all_fields (self, code, info->imm.value);
430 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
433 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
441 /* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
444 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
455 /* Insert logical/bitmask immediate for e.g. the last operand in
456 ORR <Wd|WSP>, <Wn>, #<imm>. */
458 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
459 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
462 uint64_t imm = info->imm.value;
463 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
465 if (inst->opcode->op == OP_BIC)
467 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
468 /* The constraint check should have guaranteed this wouldn't happen. */
471 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
476 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
477 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
479 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
480 aarch64_insn *code, const aarch64_inst *inst)
482 aarch64_insn value = 0;
484 assert (info->idx == 0);
487 aarch64_ins_regno (self, info, code, inst);
488 if (inst->opcode->iclass == ldstpair_indexed
489 || inst->opcode->iclass == ldstnapair_offs
490 || inst->opcode->iclass == ldstpair_off
491 || inst->opcode->iclass == loadlit)
494 switch (info->qualifier)
496 case AARCH64_OPND_QLF_S_S: value = 0; break;
497 case AARCH64_OPND_QLF_S_D: value = 1; break;
498 case AARCH64_OPND_QLF_S_Q: value = 2; break;
501 insert_field (FLD_ldst_size, code, value, 0);
506 value = aarch64_get_qualifier_standard_value (info->qualifier);
507 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
513 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
515 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
516 const aarch64_opnd_info *info, aarch64_insn *code,
517 const aarch64_inst *inst ATTRIBUTE_UNUSED)
520 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
524 /* Encode the address operand for e.g.
525 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
527 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
528 const aarch64_opnd_info *info, aarch64_insn *code,
529 const aarch64_inst *inst ATTRIBUTE_UNUSED)
532 enum aarch64_modifier_kind kind = info->shifter.kind;
535 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
537 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
539 if (kind == AARCH64_MOD_LSL)
540 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
541 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
543 if (info->qualifier != AARCH64_OPND_QLF_S_B)
544 S = info->shifter.amount != 0;
546 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
550 Must be #0 if <extend> is explicitly LSL. */
551 S = info->shifter.operator_present && info->shifter.amount_present;
552 insert_field (FLD_S, code, S, 0);
557 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
559 aarch64_ins_addr_simm (const aarch64_operand *self,
560 const aarch64_opnd_info *info,
562 const aarch64_inst *inst ATTRIBUTE_UNUSED)
567 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
568 /* simm (imm9 or imm7) */
569 imm = info->addr.offset.imm;
570 if (self->fields[0] == FLD_imm7)
571 /* scaled immediate in ld/st pair instructions.. */
572 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
573 insert_field (self->fields[0], code, imm, 0);
574 /* pre/post- index */
575 if (info->addr.writeback)
577 assert (inst->opcode->iclass != ldst_unscaled
578 && inst->opcode->iclass != ldstnapair_offs
579 && inst->opcode->iclass != ldstpair_off
580 && inst->opcode->iclass != ldst_unpriv);
581 assert (info->addr.preind != info->addr.postind);
582 if (info->addr.preind)
583 insert_field (self->fields[1], code, 1, 0);
589 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
591 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
592 const aarch64_opnd_info *info,
594 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
599 insert_field (self->fields[0], code, info->addr.base_regno, 0);
601 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
605 /* Encode the address operand for e.g.
606 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
608 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
613 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
615 if (info->addr.offset.is_reg)
616 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
618 insert_field (FLD_Rm, code, 0x1f, 0);
622 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
624 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
629 insert_field (FLD_cond, code, info->cond->value, 0);
633 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
635 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
639 /* op0:op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
645 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
647 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
648 const aarch64_opnd_info *info, aarch64_insn *code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
652 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
657 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
659 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
660 const aarch64_opnd_info *info, aarch64_insn *code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED)
663 /* op1:CRn:CRm:op2 */
664 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
665 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
669 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
672 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
673 const aarch64_opnd_info *info, aarch64_insn *code,
674 const aarch64_inst *inst ATTRIBUTE_UNUSED)
677 insert_field (FLD_CRm, code, info->barrier->value, 0);
681 /* Encode the prefetch operation option operand for e.g.
682 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
685 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
686 const aarch64_opnd_info *info, aarch64_insn *code,
687 const aarch64_inst *inst ATTRIBUTE_UNUSED)
690 insert_field (FLD_Rt, code, info->prfop->value, 0);
694 /* Encode the hint number for instructions that alias HINT but take an
698 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 const aarch64_opnd_info *info, aarch64_insn *code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
703 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
707 /* Encode the extended register operand for e.g.
708 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
710 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
711 const aarch64_opnd_info *info, aarch64_insn *code,
712 const aarch64_inst *inst ATTRIBUTE_UNUSED)
714 enum aarch64_modifier_kind kind;
717 insert_field (FLD_Rm, code, info->reg.regno, 0);
719 kind = info->shifter.kind;
720 if (kind == AARCH64_MOD_LSL)
721 kind = info->qualifier == AARCH64_OPND_QLF_W
722 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
723 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
725 insert_field (FLD_imm3, code, info->shifter.amount, 0);
730 /* Encode the shifted register operand for e.g.
731 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
733 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
734 const aarch64_opnd_info *info, aarch64_insn *code,
735 const aarch64_inst *inst ATTRIBUTE_UNUSED)
738 insert_field (FLD_Rm, code, info->reg.regno, 0);
740 insert_field (FLD_shift, code,
741 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
743 insert_field (FLD_imm6, code, info->shifter.amount, 0);
748 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
749 array specifies which field to use for Zn. MM is encoded in the
750 concatenation of imm5 and SVE_tszh, with imm5 being the less
753 aarch64_ins_sve_index (const aarch64_operand *self,
754 const aarch64_opnd_info *info, aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED)
757 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
758 insert_field (self->fields[0], code, info->reglane.regno, 0);
759 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
760 2, FLD_imm5, FLD_SVE_tszh);
764 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
767 aarch64_ins_sve_reglist (const aarch64_operand *self,
768 const aarch64_opnd_info *info, aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED)
771 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
775 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
776 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
779 aarch64_ins_sve_scale (const aarch64_operand *self,
780 const aarch64_opnd_info *info, aarch64_insn *code,
781 const aarch64_inst *inst ATTRIBUTE_UNUSED)
783 insert_all_fields (self, code, info->imm.value);
784 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
788 /* Miscellaneous encoding functions. */
790 /* Encode size[0], i.e. bit 22, for
791 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
794 encode_asimd_fcvt (aarch64_inst *inst)
797 aarch64_field field = {0, 0};
798 enum aarch64_opnd_qualifier qualifier;
800 switch (inst->opcode->op)
804 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
805 qualifier = inst->operands[1].qualifier;
809 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
810 qualifier = inst->operands[0].qualifier;
815 assert (qualifier == AARCH64_OPND_QLF_V_4S
816 || qualifier == AARCH64_OPND_QLF_V_2D);
817 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
818 gen_sub_field (FLD_size, 0, 1, &field);
819 insert_field_2 (&field, &inst->value, value, 0);
822 /* Encode size[0], i.e. bit 22, for
823 e.g. FCVTXN <Vb><d>, <Va><n>. */
826 encode_asisd_fcvtxn (aarch64_inst *inst)
828 aarch64_insn val = 1;
829 aarch64_field field = {0, 0};
830 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
831 gen_sub_field (FLD_size, 0, 1, &field);
832 insert_field_2 (&field, &inst->value, val, 0);
835 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
837 encode_fcvt (aarch64_inst *inst)
840 const aarch64_field field = {15, 2};
843 switch (inst->operands[0].qualifier)
845 case AARCH64_OPND_QLF_S_S: val = 0; break;
846 case AARCH64_OPND_QLF_S_D: val = 1; break;
847 case AARCH64_OPND_QLF_S_H: val = 3; break;
850 insert_field_2 (&field, &inst->value, val, 0);
855 /* Do miscellaneous encodings that are not common enough to be driven by
859 do_misc_encoding (aarch64_inst *inst)
861 switch (inst->opcode->op)
870 encode_asimd_fcvt (inst);
873 encode_asisd_fcvtxn (inst);
879 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
881 encode_sizeq (aarch64_inst *inst)
884 enum aarch64_field_kind kind;
887 /* Get the index of the operand whose information we are going to use
888 to encode the size and Q fields.
889 This is deduced from the possible valid qualifier lists. */
890 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
891 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
892 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
893 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
895 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
897 if (inst->opcode->iclass == asisdlse
898 || inst->opcode->iclass == asisdlsep
899 || inst->opcode->iclass == asisdlso
900 || inst->opcode->iclass == asisdlsop)
901 kind = FLD_vldst_size;
904 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
907 /* Opcodes that have fields shared by multiple operands are usually flagged
908 with flags. In this function, we detect such flags and use the
909 information in one of the related operands to do the encoding. The 'one'
910 operand is not any operand but one of the operands that has the enough
911 information for such an encoding. */
914 do_special_encoding (struct aarch64_inst *inst)
917 aarch64_insn value = 0;
919 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
921 /* Condition for truly conditional executed instructions, e.g. b.cond. */
922 if (inst->opcode->flags & F_COND)
924 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
926 if (inst->opcode->flags & F_SF)
928 idx = select_operand_for_sf_field_coding (inst->opcode);
929 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
930 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
932 insert_field (FLD_sf, &inst->value, value, 0);
933 if (inst->opcode->flags & F_N)
934 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
936 if (inst->opcode->flags & F_LSE_SZ)
938 idx = select_operand_for_sf_field_coding (inst->opcode);
939 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
940 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
942 insert_field (FLD_lse_sz, &inst->value, value, 0);
944 if (inst->opcode->flags & F_SIZEQ)
946 if (inst->opcode->flags & F_FPTYPE)
948 idx = select_operand_for_fptype_field_coding (inst->opcode);
949 switch (inst->operands[idx].qualifier)
951 case AARCH64_OPND_QLF_S_S: value = 0; break;
952 case AARCH64_OPND_QLF_S_D: value = 1; break;
953 case AARCH64_OPND_QLF_S_H: value = 3; break;
956 insert_field (FLD_type, &inst->value, value, 0);
958 if (inst->opcode->flags & F_SSIZE)
960 enum aarch64_opnd_qualifier qualifier;
961 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
962 qualifier = inst->operands[idx].qualifier;
963 assert (qualifier >= AARCH64_OPND_QLF_S_B
964 && qualifier <= AARCH64_OPND_QLF_S_Q);
965 value = aarch64_get_qualifier_standard_value (qualifier);
966 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
968 if (inst->opcode->flags & F_T)
970 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
971 aarch64_field field = {0, 0};
972 enum aarch64_opnd_qualifier qualifier;
975 qualifier = inst->operands[idx].qualifier;
976 assert (aarch64_get_operand_class (inst->opcode->operands[0])
977 == AARCH64_OPND_CLASS_SIMD_REG
978 && qualifier >= AARCH64_OPND_QLF_V_8B
979 && qualifier <= AARCH64_OPND_QLF_V_2D);
990 value = aarch64_get_qualifier_standard_value (qualifier);
991 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
992 num = (int) value >> 1;
993 assert (num >= 0 && num <= 3);
994 gen_sub_field (FLD_imm5, 0, num + 1, &field);
995 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
997 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
999 /* Use Rt to encode in the case of e.g.
1000 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1001 enum aarch64_opnd_qualifier qualifier;
1002 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1004 /* Otherwise use the result operand, which has to be a integer
1007 assert (idx == 0 || idx == 1);
1008 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1009 == AARCH64_OPND_CLASS_INT_REG);
1010 qualifier = inst->operands[idx].qualifier;
1011 insert_field (FLD_Q, &inst->value,
1012 aarch64_get_qualifier_standard_value (qualifier), 0);
1014 if (inst->opcode->flags & F_LDS_SIZE)
1016 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1017 enum aarch64_opnd_qualifier qualifier;
1018 aarch64_field field = {0, 0};
1019 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1020 == AARCH64_OPND_CLASS_INT_REG);
1021 gen_sub_field (FLD_opc, 0, 1, &field);
1022 qualifier = inst->operands[0].qualifier;
1023 insert_field_2 (&field, &inst->value,
1024 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1026 /* Miscellaneous encoding as the last step. */
1027 if (inst->opcode->flags & F_MISC)
1028 do_misc_encoding (inst);
1030 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1033 /* Converters converting an alias opcode instruction to its real form. */
1035 /* ROR <Wd>, <Ws>, #<shift>
1037 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1039 convert_ror_to_extr (aarch64_inst *inst)
1041 copy_operand_info (inst, 3, 2);
1042 copy_operand_info (inst, 2, 1);
1045 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1047 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1049 convert_xtl_to_shll (aarch64_inst *inst)
1051 inst->operands[2].qualifier = inst->operands[1].qualifier;
1052 inst->operands[2].imm.value = 0;
1056 LSR <Xd>, <Xn>, #<shift>
1058 UBFM <Xd>, <Xn>, #<shift>, #63. */
1060 convert_sr_to_bfm (aarch64_inst *inst)
1062 inst->operands[3].imm.value =
1063 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1066 /* Convert MOV to ORR. */
1068 convert_mov_to_orr (aarch64_inst *inst)
1070 /* MOV <Vd>.<T>, <Vn>.<T>
1072 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1073 copy_operand_info (inst, 2, 1);
1076 /* When <imms> >= <immr>, the instruction written:
1077 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1079 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1082 convert_bfx_to_bfm (aarch64_inst *inst)
1086 /* Convert the operand. */
1087 lsb = inst->operands[2].imm.value;
1088 width = inst->operands[3].imm.value;
1089 inst->operands[2].imm.value = lsb;
1090 inst->operands[3].imm.value = lsb + width - 1;
1093 /* When <imms> < <immr>, the instruction written:
1094 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1096 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1099 convert_bfi_to_bfm (aarch64_inst *inst)
1103 /* Convert the operand. */
1104 lsb = inst->operands[2].imm.value;
1105 width = inst->operands[3].imm.value;
1106 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1108 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1109 inst->operands[3].imm.value = width - 1;
1113 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1114 inst->operands[3].imm.value = width - 1;
1118 /* The instruction written:
1119 BFC <Xd>, #<lsb>, #<width>
1121 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1124 convert_bfc_to_bfm (aarch64_inst *inst)
1129 copy_operand_info (inst, 3, 2);
1130 copy_operand_info (inst, 2, 1);
1131 copy_operand_info (inst, 2, 0);
1132 inst->operands[1].reg.regno = 0x1f;
1134 /* Convert the immedate operand. */
1135 lsb = inst->operands[2].imm.value;
1136 width = inst->operands[3].imm.value;
1137 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1139 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1140 inst->operands[3].imm.value = width - 1;
1144 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1145 inst->operands[3].imm.value = width - 1;
1149 /* The instruction written:
1150 LSL <Xd>, <Xn>, #<shift>
1152 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1155 convert_lsl_to_ubfm (aarch64_inst *inst)
1157 int64_t shift = inst->operands[2].imm.value;
1159 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1161 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1162 inst->operands[3].imm.value = 31 - shift;
1166 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1167 inst->operands[3].imm.value = 63 - shift;
1171 /* CINC <Wd>, <Wn>, <cond>
1173 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1176 convert_to_csel (aarch64_inst *inst)
1178 copy_operand_info (inst, 3, 2);
1179 copy_operand_info (inst, 2, 1);
1180 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1183 /* CSET <Wd>, <cond>
1185 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1188 convert_cset_to_csinc (aarch64_inst *inst)
1190 copy_operand_info (inst, 3, 1);
1191 copy_operand_info (inst, 2, 0);
1192 copy_operand_info (inst, 1, 0);
1193 inst->operands[1].reg.regno = 0x1f;
1194 inst->operands[2].reg.regno = 0x1f;
1195 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1200 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1203 convert_mov_to_movewide (aarch64_inst *inst)
1206 uint32_t shift_amount;
1209 switch (inst->opcode->op)
1211 case OP_MOV_IMM_WIDE:
1212 value = inst->operands[1].imm.value;
1214 case OP_MOV_IMM_WIDEN:
1215 value = ~inst->operands[1].imm.value;
1220 inst->operands[1].type = AARCH64_OPND_HALF;
1221 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1222 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1223 /* The constraint check should have guaranteed this wouldn't happen. */
1225 value >>= shift_amount;
1227 inst->operands[1].imm.value = value;
1228 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1229 inst->operands[1].shifter.amount = shift_amount;
1234 ORR <Wd>, WZR, #<imm>. */
1237 convert_mov_to_movebitmask (aarch64_inst *inst)
1239 copy_operand_info (inst, 2, 1);
1240 inst->operands[1].reg.regno = 0x1f;
1241 inst->operands[1].skip = 0;
1244 /* Some alias opcodes are assembled by being converted to their real-form. */
1247 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1249 const aarch64_opcode *alias = inst->opcode;
1251 if ((alias->flags & F_CONV) == 0)
1252 goto convert_to_real_return;
1258 convert_sr_to_bfm (inst);
1261 convert_lsl_to_ubfm (inst);
1266 convert_to_csel (inst);
1270 convert_cset_to_csinc (inst);
1275 convert_bfx_to_bfm (inst);
1280 convert_bfi_to_bfm (inst);
1283 convert_bfc_to_bfm (inst);
1286 convert_mov_to_orr (inst);
1288 case OP_MOV_IMM_WIDE:
1289 case OP_MOV_IMM_WIDEN:
1290 convert_mov_to_movewide (inst);
1292 case OP_MOV_IMM_LOG:
1293 convert_mov_to_movebitmask (inst);
1296 convert_ror_to_extr (inst);
1302 convert_xtl_to_shll (inst);
1308 convert_to_real_return:
1309 aarch64_replace_opcode (inst, real);
1312 /* Encode *INST_ORI of the opcode code OPCODE.
1313 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1314 matched operand qualifier sequence in *QLF_SEQ. */
1317 aarch64_opcode_encode (const aarch64_opcode *opcode,
1318 const aarch64_inst *inst_ori, aarch64_insn *code,
1319 aarch64_opnd_qualifier_t *qlf_seq,
1320 aarch64_operand_error *mismatch_detail)
1323 const aarch64_opcode *aliased;
1324 aarch64_inst copy, *inst;
1326 DEBUG_TRACE ("enter with %s", opcode->name);
1328 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1332 assert (inst->opcode == NULL || inst->opcode == opcode);
1333 if (inst->opcode == NULL)
1334 inst->opcode = opcode;
1336 /* Constrain the operands.
1337 After passing this, the encoding is guaranteed to succeed. */
1338 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1340 DEBUG_TRACE ("FAIL since operand constraint not met");
1344 /* Get the base value.
1345 Note: this has to be before the aliasing handling below in order to
1346 get the base value from the alias opcode before we move on to the
1347 aliased opcode for encoding. */
1348 inst->value = opcode->opcode;
1350 /* No need to do anything else if the opcode does not have any operand. */
1351 if (aarch64_num_of_operands (opcode) == 0)
1354 /* Assign operand indexes and check types. Also put the matched
1355 operand qualifiers in *QLF_SEQ to return. */
1356 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1358 assert (opcode->operands[i] == inst->operands[i].type);
1359 inst->operands[i].idx = i;
1360 if (qlf_seq != NULL)
1361 *qlf_seq = inst->operands[i].qualifier;
1364 aliased = aarch64_find_real_opcode (opcode);
1365 /* If the opcode is an alias and it does not ask for direct encoding by
1366 itself, the instruction will be transformed to the form of real opcode
1367 and the encoding will be carried out using the rules for the aliased
1369 if (aliased != NULL && (opcode->flags & F_CONV))
1371 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1372 aliased->name, opcode->name);
1373 /* Convert the operands to the form of the real opcode. */
1374 convert_to_real (inst, aliased);
1378 aarch64_opnd_info *info = inst->operands;
1380 /* Call the inserter of each operand. */
1381 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1383 const aarch64_operand *opnd;
1384 enum aarch64_opnd type = opcode->operands[i];
1385 if (type == AARCH64_OPND_NIL)
1389 DEBUG_TRACE ("skip the incomplete operand %d", i);
1392 opnd = &aarch64_operands[type];
1393 if (operand_has_inserter (opnd))
1394 aarch64_insert_operand (opnd, info, &inst->value, inst);
1397 /* Call opcode encoders indicated by flags. */
1398 if (opcode_has_special_coder (opcode))
1399 do_special_encoding (inst);
1402 DEBUG_TRACE ("exit with %s", opcode->name);
1404 *code = inst->value;