1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
47 num = va_arg (va, uint32_t);
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
67 enum aarch64_field_kind kind;
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
86 insert_field (self->fields[0], code, info->reg.regno, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
130 case AARCH64_OPND_QLF_S_H:
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
134 case AARCH64_OPND_QLF_S_S:
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
138 case AARCH64_OPND_QLF_S_D:
140 insert_field (FLD_H, code, info->reglane.index, 0);
149 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
151 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
162 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
165 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
169 aarch64_insn value = 0;
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
179 switch (info->reglist.num_regs)
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
200 insert_field (FLD_opcode, code, value, 0);
205 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
208 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
230 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
233 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
237 aarch64_field field = {0, 0};
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
241 assert (info->reglist.has_index);
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
278 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
282 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
289 if (inst->opcode->iclass == asimdshf)
293 0000 x SEE AdvSIMD modified immediate
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
310 if (info->type == AARCH64_OPND_IMM_VLSR)
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
333 /* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
336 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
345 insert_all_fields (self, code, imm);
349 /* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
352 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
353 aarch64_insn *code, const aarch64_inst *inst)
356 aarch64_ins_imm (self, info, code, inst);
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
362 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
365 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
389 if (kind == AARCH64_MOD_NONE)
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
411 /* AARCH64_MOD_MSL: shift ones. */
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
415 insert_field_2 (&field, code, amount, 0);
420 /* Insert fields for an 8-bit floating-point immediate. */
422 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
426 insert_all_fields (self, code, info->imm.value);
430 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
433 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
441 /* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
444 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
455 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
456 the operand should be inverted before encoding. */
458 aarch64_ins_limm_1 (const aarch64_operand *self,
459 const aarch64_opnd_info *info, aarch64_insn *code,
460 const aarch64_inst *inst, bfd_boolean invert_p)
463 uint64_t imm = info->imm.value;
464 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
468 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
469 /* The constraint check should have guaranteed this wouldn't happen. */
472 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
477 /* Insert logical/bitmask immediate for e.g. the last operand in
478 ORR <Wd|WSP>, <Wn>, #<imm>. */
480 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
481 aarch64_insn *code, const aarch64_inst *inst)
483 return aarch64_ins_limm_1 (self, info, code, inst,
484 inst->opcode->op == OP_BIC);
487 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
489 aarch64_ins_inv_limm (const aarch64_operand *self,
490 const aarch64_opnd_info *info, aarch64_insn *code,
491 const aarch64_inst *inst)
493 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
496 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
497 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
499 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
500 aarch64_insn *code, const aarch64_inst *inst)
502 aarch64_insn value = 0;
504 assert (info->idx == 0);
507 aarch64_ins_regno (self, info, code, inst);
508 if (inst->opcode->iclass == ldstpair_indexed
509 || inst->opcode->iclass == ldstnapair_offs
510 || inst->opcode->iclass == ldstpair_off
511 || inst->opcode->iclass == loadlit)
514 switch (info->qualifier)
516 case AARCH64_OPND_QLF_S_S: value = 0; break;
517 case AARCH64_OPND_QLF_S_D: value = 1; break;
518 case AARCH64_OPND_QLF_S_Q: value = 2; break;
521 insert_field (FLD_ldst_size, code, value, 0);
526 value = aarch64_get_qualifier_standard_value (info->qualifier);
527 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
533 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
535 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
536 const aarch64_opnd_info *info, aarch64_insn *code,
537 const aarch64_inst *inst ATTRIBUTE_UNUSED)
540 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
544 /* Encode the address operand for e.g.
545 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
547 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
548 const aarch64_opnd_info *info, aarch64_insn *code,
549 const aarch64_inst *inst ATTRIBUTE_UNUSED)
552 enum aarch64_modifier_kind kind = info->shifter.kind;
555 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
557 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
559 if (kind == AARCH64_MOD_LSL)
560 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
561 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
563 if (info->qualifier != AARCH64_OPND_QLF_S_B)
564 S = info->shifter.amount != 0;
566 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
570 Must be #0 if <extend> is explicitly LSL. */
571 S = info->shifter.operator_present && info->shifter.amount_present;
572 insert_field (FLD_S, code, S, 0);
577 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
579 aarch64_ins_addr_simm (const aarch64_operand *self,
580 const aarch64_opnd_info *info,
582 const aarch64_inst *inst ATTRIBUTE_UNUSED)
587 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
588 /* simm (imm9 or imm7) */
589 imm = info->addr.offset.imm;
590 if (self->fields[0] == FLD_imm7)
591 /* scaled immediate in ld/st pair instructions.. */
592 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
593 insert_field (self->fields[0], code, imm, 0);
594 /* pre/post- index */
595 if (info->addr.writeback)
597 assert (inst->opcode->iclass != ldst_unscaled
598 && inst->opcode->iclass != ldstnapair_offs
599 && inst->opcode->iclass != ldstpair_off
600 && inst->opcode->iclass != ldst_unpriv);
601 assert (info->addr.preind != info->addr.postind);
602 if (info->addr.preind)
603 insert_field (self->fields[1], code, 1, 0);
609 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
611 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
612 const aarch64_opnd_info *info,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
616 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
619 insert_field (self->fields[0], code, info->addr.base_regno, 0);
621 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
625 /* Encode the address operand for e.g.
626 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
628 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
629 const aarch64_opnd_info *info, aarch64_insn *code,
630 const aarch64_inst *inst ATTRIBUTE_UNUSED)
633 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
635 if (info->addr.offset.is_reg)
636 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
638 insert_field (FLD_Rm, code, 0x1f, 0);
642 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
644 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
645 const aarch64_opnd_info *info, aarch64_insn *code,
646 const aarch64_inst *inst ATTRIBUTE_UNUSED)
649 insert_field (FLD_cond, code, info->cond->value, 0);
653 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
655 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 const aarch64_opnd_info *info, aarch64_insn *code,
657 const aarch64_inst *inst ATTRIBUTE_UNUSED)
659 /* op0:op1:CRn:CRm:op2 */
660 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
661 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
665 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
667 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
668 const aarch64_opnd_info *info, aarch64_insn *code,
669 const aarch64_inst *inst ATTRIBUTE_UNUSED)
672 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
677 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
679 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 const aarch64_opnd_info *info, aarch64_insn *code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
683 /* op1:CRn:CRm:op2 */
684 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
685 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
689 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
692 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
693 const aarch64_opnd_info *info, aarch64_insn *code,
694 const aarch64_inst *inst ATTRIBUTE_UNUSED)
697 insert_field (FLD_CRm, code, info->barrier->value, 0);
701 /* Encode the prefetch operation option operand for e.g.
702 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
705 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
706 const aarch64_opnd_info *info, aarch64_insn *code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED)
710 insert_field (FLD_Rt, code, info->prfop->value, 0);
714 /* Encode the hint number for instructions that alias HINT but take an
718 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
719 const aarch64_opnd_info *info, aarch64_insn *code,
720 const aarch64_inst *inst ATTRIBUTE_UNUSED)
723 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
727 /* Encode the extended register operand for e.g.
728 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
730 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
731 const aarch64_opnd_info *info, aarch64_insn *code,
732 const aarch64_inst *inst ATTRIBUTE_UNUSED)
734 enum aarch64_modifier_kind kind;
737 insert_field (FLD_Rm, code, info->reg.regno, 0);
739 kind = info->shifter.kind;
740 if (kind == AARCH64_MOD_LSL)
741 kind = info->qualifier == AARCH64_OPND_QLF_W
742 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
743 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
745 insert_field (FLD_imm3, code, info->shifter.amount, 0);
750 /* Encode the shifted register operand for e.g.
751 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
753 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
754 const aarch64_opnd_info *info, aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED)
758 insert_field (FLD_Rm, code, info->reg.regno, 0);
760 insert_field (FLD_shift, code,
761 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
763 insert_field (FLD_imm6, code, info->shifter.amount, 0);
768 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
769 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
770 SELF's operand-dependent value. fields[0] specifies the field that
771 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
773 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
774 const aarch64_opnd_info *info,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED)
778 int factor = 1 + get_operand_specific_data (self);
779 insert_field (self->fields[0], code, info->addr.base_regno, 0);
780 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
784 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
785 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
786 SELF's operand-dependent value. fields[0] specifies the field that
787 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
789 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
790 const aarch64_opnd_info *info,
792 const aarch64_inst *inst ATTRIBUTE_UNUSED)
794 int factor = 1 + get_operand_specific_data (self);
795 insert_field (self->fields[0], code, info->addr.base_regno, 0);
796 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
800 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
801 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
802 SELF's operand-dependent value. fields[0] specifies the field that
803 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
804 and imm3 fields, with imm3 being the less-significant part. */
806 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
807 const aarch64_opnd_info *info,
809 const aarch64_inst *inst ATTRIBUTE_UNUSED)
811 int factor = 1 + get_operand_specific_data (self);
812 insert_field (self->fields[0], code, info->addr.base_regno, 0);
813 insert_fields (code, info->addr.offset.imm / factor, 0,
814 2, FLD_imm3, FLD_SVE_imm6);
818 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
819 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
820 value. fields[0] specifies the base register field. */
822 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
823 const aarch64_opnd_info *info, aarch64_insn *code,
824 const aarch64_inst *inst ATTRIBUTE_UNUSED)
826 int factor = 1 << get_operand_specific_data (self);
827 insert_field (self->fields[0], code, info->addr.base_regno, 0);
828 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
832 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
833 is SELF's operand-dependent value. fields[0] specifies the base
834 register field and fields[1] specifies the offset register field. */
836 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
837 const aarch64_opnd_info *info, aarch64_insn *code,
838 const aarch64_inst *inst ATTRIBUTE_UNUSED)
840 insert_field (self->fields[0], code, info->addr.base_regno, 0);
841 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
845 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
846 <shift> is SELF's operand-dependent value. fields[0] specifies the
847 base register field, fields[1] specifies the offset register field and
848 fields[2] is a single-bit field that selects SXTW over UXTW. */
850 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
851 const aarch64_opnd_info *info, aarch64_insn *code,
852 const aarch64_inst *inst ATTRIBUTE_UNUSED)
854 insert_field (self->fields[0], code, info->addr.base_regno, 0);
855 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
856 if (info->shifter.kind == AARCH64_MOD_UXTW)
857 insert_field (self->fields[2], code, 0, 0);
859 insert_field (self->fields[2], code, 1, 0);
863 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
864 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
865 fields[0] specifies the base register field. */
867 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
868 const aarch64_opnd_info *info, aarch64_insn *code,
869 const aarch64_inst *inst ATTRIBUTE_UNUSED)
871 int factor = 1 << get_operand_specific_data (self);
872 insert_field (self->fields[0], code, info->addr.base_regno, 0);
873 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
877 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
878 where <modifier> is fixed by the instruction and where <msz> is a
879 2-bit unsigned number. fields[0] specifies the base register field
880 and fields[1] specifies the offset register field. */
882 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
883 const aarch64_opnd_info *info, aarch64_insn *code)
885 insert_field (self->fields[0], code, info->addr.base_regno, 0);
886 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
887 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
891 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
892 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
893 field and fields[1] specifies the offset register field. */
895 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
896 const aarch64_opnd_info *info, aarch64_insn *code,
897 const aarch64_inst *inst ATTRIBUTE_UNUSED)
899 return aarch64_ext_sve_addr_zz (self, info, code);
902 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
903 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
904 field and fields[1] specifies the offset register field. */
906 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
907 const aarch64_opnd_info *info,
909 const aarch64_inst *inst ATTRIBUTE_UNUSED)
911 return aarch64_ext_sve_addr_zz (self, info, code);
914 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
915 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
916 field and fields[1] specifies the offset register field. */
918 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
919 const aarch64_opnd_info *info,
921 const aarch64_inst *inst ATTRIBUTE_UNUSED)
923 return aarch64_ext_sve_addr_zz (self, info, code);
926 /* Encode an SVE ADD/SUB immediate. */
928 aarch64_ins_sve_aimm (const aarch64_operand *self,
929 const aarch64_opnd_info *info, aarch64_insn *code,
930 const aarch64_inst *inst ATTRIBUTE_UNUSED)
932 if (info->shifter.amount == 8)
933 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
934 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
935 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
937 insert_all_fields (self, code, info->imm.value & 0xff);
941 /* Encode an SVE CPY/DUP immediate. */
943 aarch64_ins_sve_asimm (const aarch64_operand *self,
944 const aarch64_opnd_info *info, aarch64_insn *code,
945 const aarch64_inst *inst)
947 return aarch64_ins_sve_aimm (self, info, code, inst);
950 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
951 array specifies which field to use for Zn. MM is encoded in the
952 concatenation of imm5 and SVE_tszh, with imm5 being the less
955 aarch64_ins_sve_index (const aarch64_operand *self,
956 const aarch64_opnd_info *info, aarch64_insn *code,
957 const aarch64_inst *inst ATTRIBUTE_UNUSED)
959 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
960 insert_field (self->fields[0], code, info->reglane.regno, 0);
961 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
962 2, FLD_imm5, FLD_SVE_tszh);
966 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
968 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
969 const aarch64_opnd_info *info, aarch64_insn *code,
970 const aarch64_inst *inst)
972 return aarch64_ins_limm (self, info, code, inst);
975 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
978 aarch64_ins_sve_reglist (const aarch64_operand *self,
979 const aarch64_opnd_info *info, aarch64_insn *code,
980 const aarch64_inst *inst ATTRIBUTE_UNUSED)
982 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
986 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
987 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
990 aarch64_ins_sve_scale (const aarch64_operand *self,
991 const aarch64_opnd_info *info, aarch64_insn *code,
992 const aarch64_inst *inst ATTRIBUTE_UNUSED)
994 insert_all_fields (self, code, info->imm.value);
995 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
999 /* Encode an SVE shift left immediate. */
1001 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1002 const aarch64_opnd_info *info, aarch64_insn *code,
1003 const aarch64_inst *inst)
1005 const aarch64_opnd_info *prev_operand;
1008 assert (info->idx > 0);
1009 prev_operand = &inst->operands[info->idx - 1];
1010 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1011 insert_all_fields (self, code, 8 * esize + info->imm.value);
1015 /* Encode an SVE shift right immediate. */
1017 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1018 const aarch64_opnd_info *info, aarch64_insn *code,
1019 const aarch64_inst *inst)
1021 const aarch64_opnd_info *prev_operand;
1024 assert (info->idx > 0);
1025 prev_operand = &inst->operands[info->idx - 1];
1026 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1027 insert_all_fields (self, code, 16 * esize - info->imm.value);
1031 /* Miscellaneous encoding functions. */
1033 /* Encode size[0], i.e. bit 22, for
1034 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1037 encode_asimd_fcvt (aarch64_inst *inst)
1040 aarch64_field field = {0, 0};
1041 enum aarch64_opnd_qualifier qualifier;
1043 switch (inst->opcode->op)
1047 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1048 qualifier = inst->operands[1].qualifier;
1052 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1053 qualifier = inst->operands[0].qualifier;
1058 assert (qualifier == AARCH64_OPND_QLF_V_4S
1059 || qualifier == AARCH64_OPND_QLF_V_2D);
1060 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1061 gen_sub_field (FLD_size, 0, 1, &field);
1062 insert_field_2 (&field, &inst->value, value, 0);
1065 /* Encode size[0], i.e. bit 22, for
1066 e.g. FCVTXN <Vb><d>, <Va><n>. */
1069 encode_asisd_fcvtxn (aarch64_inst *inst)
1071 aarch64_insn val = 1;
1072 aarch64_field field = {0, 0};
1073 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1074 gen_sub_field (FLD_size, 0, 1, &field);
1075 insert_field_2 (&field, &inst->value, val, 0);
1078 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1080 encode_fcvt (aarch64_inst *inst)
1083 const aarch64_field field = {15, 2};
1086 switch (inst->operands[0].qualifier)
1088 case AARCH64_OPND_QLF_S_S: val = 0; break;
1089 case AARCH64_OPND_QLF_S_D: val = 1; break;
1090 case AARCH64_OPND_QLF_S_H: val = 3; break;
1093 insert_field_2 (&field, &inst->value, val, 0);
1098 /* Do miscellaneous encodings that are not common enough to be driven by
1102 do_misc_encoding (aarch64_inst *inst)
1104 switch (inst->opcode->op)
1113 encode_asimd_fcvt (inst);
1116 encode_asisd_fcvtxn (inst);
1122 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1124 encode_sizeq (aarch64_inst *inst)
1127 enum aarch64_field_kind kind;
1130 /* Get the index of the operand whose information we are going to use
1131 to encode the size and Q fields.
1132 This is deduced from the possible valid qualifier lists. */
1133 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1134 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1135 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1136 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1138 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1140 if (inst->opcode->iclass == asisdlse
1141 || inst->opcode->iclass == asisdlsep
1142 || inst->opcode->iclass == asisdlso
1143 || inst->opcode->iclass == asisdlsop)
1144 kind = FLD_vldst_size;
1147 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1150 /* Opcodes that have fields shared by multiple operands are usually flagged
1151 with flags. In this function, we detect such flags and use the
1152 information in one of the related operands to do the encoding. The 'one'
1153 operand is not any operand but one of the operands that has the enough
1154 information for such an encoding. */
1157 do_special_encoding (struct aarch64_inst *inst)
1160 aarch64_insn value = 0;
1162 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1164 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1165 if (inst->opcode->flags & F_COND)
1167 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1169 if (inst->opcode->flags & F_SF)
1171 idx = select_operand_for_sf_field_coding (inst->opcode);
1172 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1173 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1175 insert_field (FLD_sf, &inst->value, value, 0);
1176 if (inst->opcode->flags & F_N)
1177 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1179 if (inst->opcode->flags & F_LSE_SZ)
1181 idx = select_operand_for_sf_field_coding (inst->opcode);
1182 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1183 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1185 insert_field (FLD_lse_sz, &inst->value, value, 0);
1187 if (inst->opcode->flags & F_SIZEQ)
1188 encode_sizeq (inst);
1189 if (inst->opcode->flags & F_FPTYPE)
1191 idx = select_operand_for_fptype_field_coding (inst->opcode);
1192 switch (inst->operands[idx].qualifier)
1194 case AARCH64_OPND_QLF_S_S: value = 0; break;
1195 case AARCH64_OPND_QLF_S_D: value = 1; break;
1196 case AARCH64_OPND_QLF_S_H: value = 3; break;
1197 default: assert (0);
1199 insert_field (FLD_type, &inst->value, value, 0);
1201 if (inst->opcode->flags & F_SSIZE)
1203 enum aarch64_opnd_qualifier qualifier;
1204 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1205 qualifier = inst->operands[idx].qualifier;
1206 assert (qualifier >= AARCH64_OPND_QLF_S_B
1207 && qualifier <= AARCH64_OPND_QLF_S_Q);
1208 value = aarch64_get_qualifier_standard_value (qualifier);
1209 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1211 if (inst->opcode->flags & F_T)
1213 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1214 aarch64_field field = {0, 0};
1215 enum aarch64_opnd_qualifier qualifier;
1218 qualifier = inst->operands[idx].qualifier;
1219 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1220 == AARCH64_OPND_CLASS_SIMD_REG
1221 && qualifier >= AARCH64_OPND_QLF_V_8B
1222 && qualifier <= AARCH64_OPND_QLF_V_2D);
1233 value = aarch64_get_qualifier_standard_value (qualifier);
1234 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1235 num = (int) value >> 1;
1236 assert (num >= 0 && num <= 3);
1237 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1238 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1240 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1242 /* Use Rt to encode in the case of e.g.
1243 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1244 enum aarch64_opnd_qualifier qualifier;
1245 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1247 /* Otherwise use the result operand, which has to be a integer
1250 assert (idx == 0 || idx == 1);
1251 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1252 == AARCH64_OPND_CLASS_INT_REG);
1253 qualifier = inst->operands[idx].qualifier;
1254 insert_field (FLD_Q, &inst->value,
1255 aarch64_get_qualifier_standard_value (qualifier), 0);
1257 if (inst->opcode->flags & F_LDS_SIZE)
1259 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1260 enum aarch64_opnd_qualifier qualifier;
1261 aarch64_field field = {0, 0};
1262 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1263 == AARCH64_OPND_CLASS_INT_REG);
1264 gen_sub_field (FLD_opc, 0, 1, &field);
1265 qualifier = inst->operands[0].qualifier;
1266 insert_field_2 (&field, &inst->value,
1267 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1269 /* Miscellaneous encoding as the last step. */
1270 if (inst->opcode->flags & F_MISC)
1271 do_misc_encoding (inst);
1273 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1276 /* Converters converting an alias opcode instruction to its real form. */
1278 /* ROR <Wd>, <Ws>, #<shift>
1280 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1282 convert_ror_to_extr (aarch64_inst *inst)
1284 copy_operand_info (inst, 3, 2);
1285 copy_operand_info (inst, 2, 1);
1288 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1290 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1292 convert_xtl_to_shll (aarch64_inst *inst)
1294 inst->operands[2].qualifier = inst->operands[1].qualifier;
1295 inst->operands[2].imm.value = 0;
1299 LSR <Xd>, <Xn>, #<shift>
1301 UBFM <Xd>, <Xn>, #<shift>, #63. */
1303 convert_sr_to_bfm (aarch64_inst *inst)
1305 inst->operands[3].imm.value =
1306 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1309 /* Convert MOV to ORR. */
1311 convert_mov_to_orr (aarch64_inst *inst)
1313 /* MOV <Vd>.<T>, <Vn>.<T>
1315 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1316 copy_operand_info (inst, 2, 1);
1319 /* When <imms> >= <immr>, the instruction written:
1320 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1322 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1325 convert_bfx_to_bfm (aarch64_inst *inst)
1329 /* Convert the operand. */
1330 lsb = inst->operands[2].imm.value;
1331 width = inst->operands[3].imm.value;
1332 inst->operands[2].imm.value = lsb;
1333 inst->operands[3].imm.value = lsb + width - 1;
1336 /* When <imms> < <immr>, the instruction written:
1337 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1339 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1342 convert_bfi_to_bfm (aarch64_inst *inst)
1346 /* Convert the operand. */
1347 lsb = inst->operands[2].imm.value;
1348 width = inst->operands[3].imm.value;
1349 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1351 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1352 inst->operands[3].imm.value = width - 1;
1356 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1357 inst->operands[3].imm.value = width - 1;
1361 /* The instruction written:
1362 BFC <Xd>, #<lsb>, #<width>
1364 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1367 convert_bfc_to_bfm (aarch64_inst *inst)
1372 copy_operand_info (inst, 3, 2);
1373 copy_operand_info (inst, 2, 1);
1374 copy_operand_info (inst, 2, 0);
1375 inst->operands[1].reg.regno = 0x1f;
1377 /* Convert the immedate operand. */
1378 lsb = inst->operands[2].imm.value;
1379 width = inst->operands[3].imm.value;
1380 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1382 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1383 inst->operands[3].imm.value = width - 1;
1387 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1388 inst->operands[3].imm.value = width - 1;
1392 /* The instruction written:
1393 LSL <Xd>, <Xn>, #<shift>
1395 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1398 convert_lsl_to_ubfm (aarch64_inst *inst)
1400 int64_t shift = inst->operands[2].imm.value;
1402 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1404 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1405 inst->operands[3].imm.value = 31 - shift;
1409 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1410 inst->operands[3].imm.value = 63 - shift;
1414 /* CINC <Wd>, <Wn>, <cond>
1416 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1419 convert_to_csel (aarch64_inst *inst)
1421 copy_operand_info (inst, 3, 2);
1422 copy_operand_info (inst, 2, 1);
1423 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1426 /* CSET <Wd>, <cond>
1428 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1431 convert_cset_to_csinc (aarch64_inst *inst)
1433 copy_operand_info (inst, 3, 1);
1434 copy_operand_info (inst, 2, 0);
1435 copy_operand_info (inst, 1, 0);
1436 inst->operands[1].reg.regno = 0x1f;
1437 inst->operands[2].reg.regno = 0x1f;
1438 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1443 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1446 convert_mov_to_movewide (aarch64_inst *inst)
1449 uint32_t shift_amount;
1452 switch (inst->opcode->op)
1454 case OP_MOV_IMM_WIDE:
1455 value = inst->operands[1].imm.value;
1457 case OP_MOV_IMM_WIDEN:
1458 value = ~inst->operands[1].imm.value;
1463 inst->operands[1].type = AARCH64_OPND_HALF;
1464 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1465 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1466 /* The constraint check should have guaranteed this wouldn't happen. */
1468 value >>= shift_amount;
1470 inst->operands[1].imm.value = value;
1471 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1472 inst->operands[1].shifter.amount = shift_amount;
1477 ORR <Wd>, WZR, #<imm>. */
1480 convert_mov_to_movebitmask (aarch64_inst *inst)
1482 copy_operand_info (inst, 2, 1);
1483 inst->operands[1].reg.regno = 0x1f;
1484 inst->operands[1].skip = 0;
1487 /* Some alias opcodes are assembled by being converted to their real-form. */
1490 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1492 const aarch64_opcode *alias = inst->opcode;
1494 if ((alias->flags & F_CONV) == 0)
1495 goto convert_to_real_return;
1501 convert_sr_to_bfm (inst);
1504 convert_lsl_to_ubfm (inst);
1509 convert_to_csel (inst);
1513 convert_cset_to_csinc (inst);
1518 convert_bfx_to_bfm (inst);
1523 convert_bfi_to_bfm (inst);
1526 convert_bfc_to_bfm (inst);
1529 convert_mov_to_orr (inst);
1531 case OP_MOV_IMM_WIDE:
1532 case OP_MOV_IMM_WIDEN:
1533 convert_mov_to_movewide (inst);
1535 case OP_MOV_IMM_LOG:
1536 convert_mov_to_movebitmask (inst);
1539 convert_ror_to_extr (inst);
1545 convert_xtl_to_shll (inst);
1551 convert_to_real_return:
1552 aarch64_replace_opcode (inst, real);
1555 /* Encode *INST_ORI of the opcode code OPCODE.
1556 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1557 matched operand qualifier sequence in *QLF_SEQ. */
1560 aarch64_opcode_encode (const aarch64_opcode *opcode,
1561 const aarch64_inst *inst_ori, aarch64_insn *code,
1562 aarch64_opnd_qualifier_t *qlf_seq,
1563 aarch64_operand_error *mismatch_detail)
1566 const aarch64_opcode *aliased;
1567 aarch64_inst copy, *inst;
1569 DEBUG_TRACE ("enter with %s", opcode->name);
1571 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1575 assert (inst->opcode == NULL || inst->opcode == opcode);
1576 if (inst->opcode == NULL)
1577 inst->opcode = opcode;
1579 /* Constrain the operands.
1580 After passing this, the encoding is guaranteed to succeed. */
1581 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1583 DEBUG_TRACE ("FAIL since operand constraint not met");
1587 /* Get the base value.
1588 Note: this has to be before the aliasing handling below in order to
1589 get the base value from the alias opcode before we move on to the
1590 aliased opcode for encoding. */
1591 inst->value = opcode->opcode;
1593 /* No need to do anything else if the opcode does not have any operand. */
1594 if (aarch64_num_of_operands (opcode) == 0)
1597 /* Assign operand indexes and check types. Also put the matched
1598 operand qualifiers in *QLF_SEQ to return. */
1599 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1601 assert (opcode->operands[i] == inst->operands[i].type);
1602 inst->operands[i].idx = i;
1603 if (qlf_seq != NULL)
1604 *qlf_seq = inst->operands[i].qualifier;
1607 aliased = aarch64_find_real_opcode (opcode);
1608 /* If the opcode is an alias and it does not ask for direct encoding by
1609 itself, the instruction will be transformed to the form of real opcode
1610 and the encoding will be carried out using the rules for the aliased
1612 if (aliased != NULL && (opcode->flags & F_CONV))
1614 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1615 aliased->name, opcode->name);
1616 /* Convert the operands to the form of the real opcode. */
1617 convert_to_real (inst, aliased);
1621 aarch64_opnd_info *info = inst->operands;
1623 /* Call the inserter of each operand. */
1624 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1626 const aarch64_operand *opnd;
1627 enum aarch64_opnd type = opcode->operands[i];
1628 if (type == AARCH64_OPND_NIL)
1632 DEBUG_TRACE ("skip the incomplete operand %d", i);
1635 opnd = &aarch64_operands[type];
1636 if (operand_has_inserter (opnd))
1637 aarch64_insert_operand (opnd, info, &inst->value, inst);
1640 /* Call opcode encoders indicated by flags. */
1641 if (opcode_has_special_coder (opcode))
1642 do_special_encoding (inst);
1645 DEBUG_TRACE ("exit with %s", opcode->name);
1647 *code = inst->value;