1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
47 num = va_arg (va, uint32_t);
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
67 enum aarch64_field_kind kind;
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
86 insert_field (self->fields[0], code, info->reg.regno, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
130 case AARCH64_OPND_QLF_S_H:
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
134 case AARCH64_OPND_QLF_S_S:
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
138 case AARCH64_OPND_QLF_S_D:
140 insert_field (FLD_H, code, info->reglane.index, 0);
149 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
151 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
162 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
165 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
169 aarch64_insn value = 0;
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
179 switch (info->reglist.num_regs)
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
200 insert_field (FLD_opcode, code, value, 0);
205 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
208 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
230 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
233 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
237 aarch64_field field = {0, 0};
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
241 assert (info->reglist.has_index);
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
278 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
282 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
289 if (inst->opcode->iclass == asimdshf)
293 0000 x SEE AdvSIMD modified immediate
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
310 if (info->type == AARCH64_OPND_IMM_VLSR)
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
333 /* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
336 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
345 insert_all_fields (self, code, imm);
349 /* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
352 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
353 aarch64_insn *code, const aarch64_inst *inst)
356 aarch64_ins_imm (self, info, code, inst);
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
362 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
365 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
389 if (kind == AARCH64_MOD_NONE)
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
411 /* AARCH64_MOD_MSL: shift ones. */
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
415 insert_field_2 (&field, code, amount, 0);
420 /* Insert fields for an 8-bit floating-point immediate. */
422 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
426 insert_all_fields (self, code, info->imm.value);
430 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
433 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
441 /* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
444 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
455 /* Insert logical/bitmask immediate for e.g. the last operand in
456 ORR <Wd|WSP>, <Wn>, #<imm>. */
458 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
459 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
462 uint64_t imm = info->imm.value;
463 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
465 if (inst->opcode->op == OP_BIC)
467 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
468 /* The constraint check should have guaranteed this wouldn't happen. */
471 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
476 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
477 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
479 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
480 aarch64_insn *code, const aarch64_inst *inst)
482 aarch64_insn value = 0;
484 assert (info->idx == 0);
487 aarch64_ins_regno (self, info, code, inst);
488 if (inst->opcode->iclass == ldstpair_indexed
489 || inst->opcode->iclass == ldstnapair_offs
490 || inst->opcode->iclass == ldstpair_off
491 || inst->opcode->iclass == loadlit)
494 switch (info->qualifier)
496 case AARCH64_OPND_QLF_S_S: value = 0; break;
497 case AARCH64_OPND_QLF_S_D: value = 1; break;
498 case AARCH64_OPND_QLF_S_Q: value = 2; break;
501 insert_field (FLD_ldst_size, code, value, 0);
506 value = aarch64_get_qualifier_standard_value (info->qualifier);
507 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
513 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
515 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
516 const aarch64_opnd_info *info, aarch64_insn *code,
517 const aarch64_inst *inst ATTRIBUTE_UNUSED)
520 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
524 /* Encode the address operand for e.g.
525 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
527 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
528 const aarch64_opnd_info *info, aarch64_insn *code,
529 const aarch64_inst *inst ATTRIBUTE_UNUSED)
532 enum aarch64_modifier_kind kind = info->shifter.kind;
535 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
537 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
539 if (kind == AARCH64_MOD_LSL)
540 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
541 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
543 if (info->qualifier != AARCH64_OPND_QLF_S_B)
544 S = info->shifter.amount != 0;
546 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
550 Must be #0 if <extend> is explicitly LSL. */
551 S = info->shifter.operator_present && info->shifter.amount_present;
552 insert_field (FLD_S, code, S, 0);
557 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
559 aarch64_ins_addr_simm (const aarch64_operand *self,
560 const aarch64_opnd_info *info,
562 const aarch64_inst *inst ATTRIBUTE_UNUSED)
567 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
568 /* simm (imm9 or imm7) */
569 imm = info->addr.offset.imm;
570 if (self->fields[0] == FLD_imm7)
571 /* scaled immediate in ld/st pair instructions.. */
572 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
573 insert_field (self->fields[0], code, imm, 0);
574 /* pre/post- index */
575 if (info->addr.writeback)
577 assert (inst->opcode->iclass != ldst_unscaled
578 && inst->opcode->iclass != ldstnapair_offs
579 && inst->opcode->iclass != ldstpair_off
580 && inst->opcode->iclass != ldst_unpriv);
581 assert (info->addr.preind != info->addr.postind);
582 if (info->addr.preind)
583 insert_field (self->fields[1], code, 1, 0);
589 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
591 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
592 const aarch64_opnd_info *info,
594 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
599 insert_field (self->fields[0], code, info->addr.base_regno, 0);
601 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
605 /* Encode the address operand for e.g.
606 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
608 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
613 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
615 if (info->addr.offset.is_reg)
616 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
618 insert_field (FLD_Rm, code, 0x1f, 0);
622 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
624 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
629 insert_field (FLD_cond, code, info->cond->value, 0);
633 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
635 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
639 /* op0:op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
645 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
647 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
648 const aarch64_opnd_info *info, aarch64_insn *code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
652 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
657 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
659 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
660 const aarch64_opnd_info *info, aarch64_insn *code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED)
663 /* op1:CRn:CRm:op2 */
664 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
665 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
669 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
672 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
673 const aarch64_opnd_info *info, aarch64_insn *code,
674 const aarch64_inst *inst ATTRIBUTE_UNUSED)
677 insert_field (FLD_CRm, code, info->barrier->value, 0);
681 /* Encode the prefetch operation option operand for e.g.
682 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
685 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
686 const aarch64_opnd_info *info, aarch64_insn *code,
687 const aarch64_inst *inst ATTRIBUTE_UNUSED)
690 insert_field (FLD_Rt, code, info->prfop->value, 0);
694 /* Encode the hint number for instructions that alias HINT but take an
698 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 const aarch64_opnd_info *info, aarch64_insn *code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
703 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
707 /* Encode the extended register operand for e.g.
708 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
710 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
711 const aarch64_opnd_info *info, aarch64_insn *code,
712 const aarch64_inst *inst ATTRIBUTE_UNUSED)
714 enum aarch64_modifier_kind kind;
717 insert_field (FLD_Rm, code, info->reg.regno, 0);
719 kind = info->shifter.kind;
720 if (kind == AARCH64_MOD_LSL)
721 kind = info->qualifier == AARCH64_OPND_QLF_W
722 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
723 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
725 insert_field (FLD_imm3, code, info->shifter.amount, 0);
730 /* Encode the shifted register operand for e.g.
731 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
733 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
734 const aarch64_opnd_info *info, aarch64_insn *code,
735 const aarch64_inst *inst ATTRIBUTE_UNUSED)
738 insert_field (FLD_Rm, code, info->reg.regno, 0);
740 insert_field (FLD_shift, code,
741 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
743 insert_field (FLD_imm6, code, info->shifter.amount, 0);
748 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
749 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
750 SELF's operand-dependent value. fields[0] specifies the field that
751 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
753 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
754 const aarch64_opnd_info *info,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED)
758 int factor = 1 + get_operand_specific_data (self);
759 insert_field (self->fields[0], code, info->addr.base_regno, 0);
760 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
764 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
765 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
766 SELF's operand-dependent value. fields[0] specifies the field that
767 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
769 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
770 const aarch64_opnd_info *info,
772 const aarch64_inst *inst ATTRIBUTE_UNUSED)
774 int factor = 1 + get_operand_specific_data (self);
775 insert_field (self->fields[0], code, info->addr.base_regno, 0);
776 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
780 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
781 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
782 SELF's operand-dependent value. fields[0] specifies the field that
783 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
784 and imm3 fields, with imm3 being the less-significant part. */
786 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
787 const aarch64_opnd_info *info,
789 const aarch64_inst *inst ATTRIBUTE_UNUSED)
791 int factor = 1 + get_operand_specific_data (self);
792 insert_field (self->fields[0], code, info->addr.base_regno, 0);
793 insert_fields (code, info->addr.offset.imm / factor, 0,
794 2, FLD_imm3, FLD_SVE_imm6);
798 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
799 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
800 value. fields[0] specifies the base register field. */
802 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
803 const aarch64_opnd_info *info, aarch64_insn *code,
804 const aarch64_inst *inst ATTRIBUTE_UNUSED)
806 int factor = 1 << get_operand_specific_data (self);
807 insert_field (self->fields[0], code, info->addr.base_regno, 0);
808 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
812 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
813 is SELF's operand-dependent value. fields[0] specifies the base
814 register field and fields[1] specifies the offset register field. */
816 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
817 const aarch64_opnd_info *info, aarch64_insn *code,
818 const aarch64_inst *inst ATTRIBUTE_UNUSED)
820 insert_field (self->fields[0], code, info->addr.base_regno, 0);
821 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
825 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
826 <shift> is SELF's operand-dependent value. fields[0] specifies the
827 base register field, fields[1] specifies the offset register field and
828 fields[2] is a single-bit field that selects SXTW over UXTW. */
830 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
831 const aarch64_opnd_info *info, aarch64_insn *code,
832 const aarch64_inst *inst ATTRIBUTE_UNUSED)
834 insert_field (self->fields[0], code, info->addr.base_regno, 0);
835 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
836 if (info->shifter.kind == AARCH64_MOD_UXTW)
837 insert_field (self->fields[2], code, 0, 0);
839 insert_field (self->fields[2], code, 1, 0);
843 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
844 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
845 fields[0] specifies the base register field. */
847 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
848 const aarch64_opnd_info *info, aarch64_insn *code,
849 const aarch64_inst *inst ATTRIBUTE_UNUSED)
851 int factor = 1 << get_operand_specific_data (self);
852 insert_field (self->fields[0], code, info->addr.base_regno, 0);
853 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
857 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
858 where <modifier> is fixed by the instruction and where <msz> is a
859 2-bit unsigned number. fields[0] specifies the base register field
860 and fields[1] specifies the offset register field. */
862 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
863 const aarch64_opnd_info *info, aarch64_insn *code)
865 insert_field (self->fields[0], code, info->addr.base_regno, 0);
866 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
867 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
871 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
872 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
873 field and fields[1] specifies the offset register field. */
875 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
876 const aarch64_opnd_info *info, aarch64_insn *code,
877 const aarch64_inst *inst ATTRIBUTE_UNUSED)
879 return aarch64_ext_sve_addr_zz (self, info, code);
882 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
883 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
884 field and fields[1] specifies the offset register field. */
886 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
887 const aarch64_opnd_info *info,
889 const aarch64_inst *inst ATTRIBUTE_UNUSED)
891 return aarch64_ext_sve_addr_zz (self, info, code);
894 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
895 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
896 field and fields[1] specifies the offset register field. */
898 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
899 const aarch64_opnd_info *info,
901 const aarch64_inst *inst ATTRIBUTE_UNUSED)
903 return aarch64_ext_sve_addr_zz (self, info, code);
906 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
907 array specifies which field to use for Zn. MM is encoded in the
908 concatenation of imm5 and SVE_tszh, with imm5 being the less
911 aarch64_ins_sve_index (const aarch64_operand *self,
912 const aarch64_opnd_info *info, aarch64_insn *code,
913 const aarch64_inst *inst ATTRIBUTE_UNUSED)
915 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
916 insert_field (self->fields[0], code, info->reglane.regno, 0);
917 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
918 2, FLD_imm5, FLD_SVE_tszh);
922 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
925 aarch64_ins_sve_reglist (const aarch64_operand *self,
926 const aarch64_opnd_info *info, aarch64_insn *code,
927 const aarch64_inst *inst ATTRIBUTE_UNUSED)
929 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
933 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
934 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
937 aarch64_ins_sve_scale (const aarch64_operand *self,
938 const aarch64_opnd_info *info, aarch64_insn *code,
939 const aarch64_inst *inst ATTRIBUTE_UNUSED)
941 insert_all_fields (self, code, info->imm.value);
942 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
946 /* Miscellaneous encoding functions. */
948 /* Encode size[0], i.e. bit 22, for
949 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
952 encode_asimd_fcvt (aarch64_inst *inst)
955 aarch64_field field = {0, 0};
956 enum aarch64_opnd_qualifier qualifier;
958 switch (inst->opcode->op)
962 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
963 qualifier = inst->operands[1].qualifier;
967 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
968 qualifier = inst->operands[0].qualifier;
973 assert (qualifier == AARCH64_OPND_QLF_V_4S
974 || qualifier == AARCH64_OPND_QLF_V_2D);
975 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
976 gen_sub_field (FLD_size, 0, 1, &field);
977 insert_field_2 (&field, &inst->value, value, 0);
980 /* Encode size[0], i.e. bit 22, for
981 e.g. FCVTXN <Vb><d>, <Va><n>. */
984 encode_asisd_fcvtxn (aarch64_inst *inst)
986 aarch64_insn val = 1;
987 aarch64_field field = {0, 0};
988 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
989 gen_sub_field (FLD_size, 0, 1, &field);
990 insert_field_2 (&field, &inst->value, val, 0);
993 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
995 encode_fcvt (aarch64_inst *inst)
998 const aarch64_field field = {15, 2};
1001 switch (inst->operands[0].qualifier)
1003 case AARCH64_OPND_QLF_S_S: val = 0; break;
1004 case AARCH64_OPND_QLF_S_D: val = 1; break;
1005 case AARCH64_OPND_QLF_S_H: val = 3; break;
1008 insert_field_2 (&field, &inst->value, val, 0);
1013 /* Do miscellaneous encodings that are not common enough to be driven by
1017 do_misc_encoding (aarch64_inst *inst)
1019 switch (inst->opcode->op)
1028 encode_asimd_fcvt (inst);
1031 encode_asisd_fcvtxn (inst);
1037 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1039 encode_sizeq (aarch64_inst *inst)
1042 enum aarch64_field_kind kind;
1045 /* Get the index of the operand whose information we are going to use
1046 to encode the size and Q fields.
1047 This is deduced from the possible valid qualifier lists. */
1048 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1049 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1050 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1051 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1053 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1055 if (inst->opcode->iclass == asisdlse
1056 || inst->opcode->iclass == asisdlsep
1057 || inst->opcode->iclass == asisdlso
1058 || inst->opcode->iclass == asisdlsop)
1059 kind = FLD_vldst_size;
1062 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1065 /* Opcodes that have fields shared by multiple operands are usually flagged
1066 with flags. In this function, we detect such flags and use the
1067 information in one of the related operands to do the encoding. The 'one'
1068 operand is not any operand but one of the operands that has the enough
1069 information for such an encoding. */
1072 do_special_encoding (struct aarch64_inst *inst)
1075 aarch64_insn value = 0;
1077 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1079 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1080 if (inst->opcode->flags & F_COND)
1082 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1084 if (inst->opcode->flags & F_SF)
1086 idx = select_operand_for_sf_field_coding (inst->opcode);
1087 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1088 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1090 insert_field (FLD_sf, &inst->value, value, 0);
1091 if (inst->opcode->flags & F_N)
1092 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1094 if (inst->opcode->flags & F_LSE_SZ)
1096 idx = select_operand_for_sf_field_coding (inst->opcode);
1097 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1098 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1100 insert_field (FLD_lse_sz, &inst->value, value, 0);
1102 if (inst->opcode->flags & F_SIZEQ)
1103 encode_sizeq (inst);
1104 if (inst->opcode->flags & F_FPTYPE)
1106 idx = select_operand_for_fptype_field_coding (inst->opcode);
1107 switch (inst->operands[idx].qualifier)
1109 case AARCH64_OPND_QLF_S_S: value = 0; break;
1110 case AARCH64_OPND_QLF_S_D: value = 1; break;
1111 case AARCH64_OPND_QLF_S_H: value = 3; break;
1112 default: assert (0);
1114 insert_field (FLD_type, &inst->value, value, 0);
1116 if (inst->opcode->flags & F_SSIZE)
1118 enum aarch64_opnd_qualifier qualifier;
1119 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1120 qualifier = inst->operands[idx].qualifier;
1121 assert (qualifier >= AARCH64_OPND_QLF_S_B
1122 && qualifier <= AARCH64_OPND_QLF_S_Q);
1123 value = aarch64_get_qualifier_standard_value (qualifier);
1124 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1126 if (inst->opcode->flags & F_T)
1128 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1129 aarch64_field field = {0, 0};
1130 enum aarch64_opnd_qualifier qualifier;
1133 qualifier = inst->operands[idx].qualifier;
1134 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1135 == AARCH64_OPND_CLASS_SIMD_REG
1136 && qualifier >= AARCH64_OPND_QLF_V_8B
1137 && qualifier <= AARCH64_OPND_QLF_V_2D);
1148 value = aarch64_get_qualifier_standard_value (qualifier);
1149 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1150 num = (int) value >> 1;
1151 assert (num >= 0 && num <= 3);
1152 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1153 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1155 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1157 /* Use Rt to encode in the case of e.g.
1158 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1159 enum aarch64_opnd_qualifier qualifier;
1160 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1162 /* Otherwise use the result operand, which has to be a integer
1165 assert (idx == 0 || idx == 1);
1166 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1167 == AARCH64_OPND_CLASS_INT_REG);
1168 qualifier = inst->operands[idx].qualifier;
1169 insert_field (FLD_Q, &inst->value,
1170 aarch64_get_qualifier_standard_value (qualifier), 0);
1172 if (inst->opcode->flags & F_LDS_SIZE)
1174 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1175 enum aarch64_opnd_qualifier qualifier;
1176 aarch64_field field = {0, 0};
1177 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1178 == AARCH64_OPND_CLASS_INT_REG);
1179 gen_sub_field (FLD_opc, 0, 1, &field);
1180 qualifier = inst->operands[0].qualifier;
1181 insert_field_2 (&field, &inst->value,
1182 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1184 /* Miscellaneous encoding as the last step. */
1185 if (inst->opcode->flags & F_MISC)
1186 do_misc_encoding (inst);
1188 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1191 /* Converters converting an alias opcode instruction to its real form. */
1193 /* ROR <Wd>, <Ws>, #<shift>
1195 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1197 convert_ror_to_extr (aarch64_inst *inst)
1199 copy_operand_info (inst, 3, 2);
1200 copy_operand_info (inst, 2, 1);
1203 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1205 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1207 convert_xtl_to_shll (aarch64_inst *inst)
1209 inst->operands[2].qualifier = inst->operands[1].qualifier;
1210 inst->operands[2].imm.value = 0;
1214 LSR <Xd>, <Xn>, #<shift>
1216 UBFM <Xd>, <Xn>, #<shift>, #63. */
1218 convert_sr_to_bfm (aarch64_inst *inst)
1220 inst->operands[3].imm.value =
1221 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1224 /* Convert MOV to ORR. */
1226 convert_mov_to_orr (aarch64_inst *inst)
1228 /* MOV <Vd>.<T>, <Vn>.<T>
1230 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1231 copy_operand_info (inst, 2, 1);
1234 /* When <imms> >= <immr>, the instruction written:
1235 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1237 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1240 convert_bfx_to_bfm (aarch64_inst *inst)
1244 /* Convert the operand. */
1245 lsb = inst->operands[2].imm.value;
1246 width = inst->operands[3].imm.value;
1247 inst->operands[2].imm.value = lsb;
1248 inst->operands[3].imm.value = lsb + width - 1;
1251 /* When <imms> < <immr>, the instruction written:
1252 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1254 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1257 convert_bfi_to_bfm (aarch64_inst *inst)
1261 /* Convert the operand. */
1262 lsb = inst->operands[2].imm.value;
1263 width = inst->operands[3].imm.value;
1264 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1266 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1267 inst->operands[3].imm.value = width - 1;
1271 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1272 inst->operands[3].imm.value = width - 1;
1276 /* The instruction written:
1277 BFC <Xd>, #<lsb>, #<width>
1279 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1282 convert_bfc_to_bfm (aarch64_inst *inst)
1287 copy_operand_info (inst, 3, 2);
1288 copy_operand_info (inst, 2, 1);
1289 copy_operand_info (inst, 2, 0);
1290 inst->operands[1].reg.regno = 0x1f;
1292 /* Convert the immedate operand. */
1293 lsb = inst->operands[2].imm.value;
1294 width = inst->operands[3].imm.value;
1295 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1297 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1298 inst->operands[3].imm.value = width - 1;
1302 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1303 inst->operands[3].imm.value = width - 1;
1307 /* The instruction written:
1308 LSL <Xd>, <Xn>, #<shift>
1310 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1313 convert_lsl_to_ubfm (aarch64_inst *inst)
1315 int64_t shift = inst->operands[2].imm.value;
1317 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1319 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1320 inst->operands[3].imm.value = 31 - shift;
1324 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1325 inst->operands[3].imm.value = 63 - shift;
1329 /* CINC <Wd>, <Wn>, <cond>
1331 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1334 convert_to_csel (aarch64_inst *inst)
1336 copy_operand_info (inst, 3, 2);
1337 copy_operand_info (inst, 2, 1);
1338 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1341 /* CSET <Wd>, <cond>
1343 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1346 convert_cset_to_csinc (aarch64_inst *inst)
1348 copy_operand_info (inst, 3, 1);
1349 copy_operand_info (inst, 2, 0);
1350 copy_operand_info (inst, 1, 0);
1351 inst->operands[1].reg.regno = 0x1f;
1352 inst->operands[2].reg.regno = 0x1f;
1353 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1358 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1361 convert_mov_to_movewide (aarch64_inst *inst)
1364 uint32_t shift_amount;
1367 switch (inst->opcode->op)
1369 case OP_MOV_IMM_WIDE:
1370 value = inst->operands[1].imm.value;
1372 case OP_MOV_IMM_WIDEN:
1373 value = ~inst->operands[1].imm.value;
1378 inst->operands[1].type = AARCH64_OPND_HALF;
1379 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1380 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1381 /* The constraint check should have guaranteed this wouldn't happen. */
1383 value >>= shift_amount;
1385 inst->operands[1].imm.value = value;
1386 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1387 inst->operands[1].shifter.amount = shift_amount;
1392 ORR <Wd>, WZR, #<imm>. */
1395 convert_mov_to_movebitmask (aarch64_inst *inst)
1397 copy_operand_info (inst, 2, 1);
1398 inst->operands[1].reg.regno = 0x1f;
1399 inst->operands[1].skip = 0;
1402 /* Some alias opcodes are assembled by being converted to their real-form. */
1405 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1407 const aarch64_opcode *alias = inst->opcode;
1409 if ((alias->flags & F_CONV) == 0)
1410 goto convert_to_real_return;
1416 convert_sr_to_bfm (inst);
1419 convert_lsl_to_ubfm (inst);
1424 convert_to_csel (inst);
1428 convert_cset_to_csinc (inst);
1433 convert_bfx_to_bfm (inst);
1438 convert_bfi_to_bfm (inst);
1441 convert_bfc_to_bfm (inst);
1444 convert_mov_to_orr (inst);
1446 case OP_MOV_IMM_WIDE:
1447 case OP_MOV_IMM_WIDEN:
1448 convert_mov_to_movewide (inst);
1450 case OP_MOV_IMM_LOG:
1451 convert_mov_to_movebitmask (inst);
1454 convert_ror_to_extr (inst);
1460 convert_xtl_to_shll (inst);
1466 convert_to_real_return:
1467 aarch64_replace_opcode (inst, real);
1470 /* Encode *INST_ORI of the opcode code OPCODE.
1471 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1472 matched operand qualifier sequence in *QLF_SEQ. */
1475 aarch64_opcode_encode (const aarch64_opcode *opcode,
1476 const aarch64_inst *inst_ori, aarch64_insn *code,
1477 aarch64_opnd_qualifier_t *qlf_seq,
1478 aarch64_operand_error *mismatch_detail)
1481 const aarch64_opcode *aliased;
1482 aarch64_inst copy, *inst;
1484 DEBUG_TRACE ("enter with %s", opcode->name);
1486 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1490 assert (inst->opcode == NULL || inst->opcode == opcode);
1491 if (inst->opcode == NULL)
1492 inst->opcode = opcode;
1494 /* Constrain the operands.
1495 After passing this, the encoding is guaranteed to succeed. */
1496 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1498 DEBUG_TRACE ("FAIL since operand constraint not met");
1502 /* Get the base value.
1503 Note: this has to be before the aliasing handling below in order to
1504 get the base value from the alias opcode before we move on to the
1505 aliased opcode for encoding. */
1506 inst->value = opcode->opcode;
1508 /* No need to do anything else if the opcode does not have any operand. */
1509 if (aarch64_num_of_operands (opcode) == 0)
1512 /* Assign operand indexes and check types. Also put the matched
1513 operand qualifiers in *QLF_SEQ to return. */
1514 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1516 assert (opcode->operands[i] == inst->operands[i].type);
1517 inst->operands[i].idx = i;
1518 if (qlf_seq != NULL)
1519 *qlf_seq = inst->operands[i].qualifier;
1522 aliased = aarch64_find_real_opcode (opcode);
1523 /* If the opcode is an alias and it does not ask for direct encoding by
1524 itself, the instruction will be transformed to the form of real opcode
1525 and the encoding will be carried out using the rules for the aliased
1527 if (aliased != NULL && (opcode->flags & F_CONV))
1529 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1530 aliased->name, opcode->name);
1531 /* Convert the operands to the form of the real opcode. */
1532 convert_to_real (inst, aliased);
1536 aarch64_opnd_info *info = inst->operands;
1538 /* Call the inserter of each operand. */
1539 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1541 const aarch64_operand *opnd;
1542 enum aarch64_opnd type = opcode->operands[i];
1543 if (type == AARCH64_OPND_NIL)
1547 DEBUG_TRACE ("skip the incomplete operand %d", i);
1550 opnd = &aarch64_operands[type];
1551 if (operand_has_inserter (opnd))
1552 aarch64_insert_operand (opnd, info, &inst->value, inst);
1555 /* Call opcode encoders indicated by flags. */
1556 if (opcode_has_special_coder (opcode))
1557 do_special_encoding (inst);
1560 DEBUG_TRACE ("exit with %s", opcode->name);
1562 *code = inst->value;