1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
47 num = va_arg (va, uint32_t);
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
67 enum aarch64_field_kind kind;
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
86 insert_field (self->fields[0], code, info->reg.regno, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
124 else if (inst->opcode->iclass == dotproduct)
126 unsigned reglane_index = info->reglane.index;
127 switch (info->qualifier)
129 case AARCH64_OPND_QLF_S_B:
131 assert (reglane_index < 4);
132 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
138 else if (inst->opcode->iclass == cryptosm3)
140 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
141 unsigned reglane_index = info->reglane.index;
142 assert (reglane_index < 4);
143 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
147 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
148 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
149 unsigned reglane_index = info->reglane.index;
151 if (inst->opcode->op == OP_FCMLA_ELEM)
152 /* Complex operand takes two elements. */
155 switch (info->qualifier)
157 case AARCH64_OPND_QLF_S_H:
159 assert (reglane_index < 8);
160 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
162 case AARCH64_OPND_QLF_S_S:
164 assert (reglane_index < 4);
165 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
167 case AARCH64_OPND_QLF_S_D:
169 assert (reglane_index < 2);
170 insert_field (FLD_H, code, reglane_index, 0);
179 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
181 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
183 const aarch64_inst *inst ATTRIBUTE_UNUSED)
186 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
188 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
192 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
193 in AdvSIMD load/store instructions. */
195 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
196 const aarch64_opnd_info *info, aarch64_insn *code,
197 const aarch64_inst *inst)
199 aarch64_insn value = 0;
200 /* Number of elements in each structure to be loaded/stored. */
201 unsigned num = get_opcode_dependent_value (inst->opcode);
204 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
209 switch (info->reglist.num_regs)
211 case 1: value = 0x7; break;
212 case 2: value = 0xa; break;
213 case 3: value = 0x6; break;
214 case 4: value = 0x2; break;
219 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
230 insert_field (FLD_opcode, code, value, 0);
235 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
236 single structure to all lanes instructions. */
238 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
239 const aarch64_opnd_info *info, aarch64_insn *code,
240 const aarch64_inst *inst)
243 /* The opcode dependent area stores the number of elements in
244 each structure to be loaded/stored. */
245 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
248 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
250 value = (aarch64_insn) 0;
251 if (is_ld1r && info->reglist.num_regs == 2)
252 /* OP_LD1R does not have alternating variant, but have "two consecutive"
254 value = (aarch64_insn) 1;
255 insert_field (FLD_S, code, value, 0);
260 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
261 operand e.g. Vt in AdvSIMD load/store single element instructions. */
263 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
264 const aarch64_opnd_info *info, aarch64_insn *code,
265 const aarch64_inst *inst ATTRIBUTE_UNUSED)
267 aarch64_field field = {0, 0};
268 aarch64_insn QSsize = 0; /* fields Q:S:size. */
269 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
271 assert (info->reglist.has_index);
274 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
275 /* Encode the index, opcode<2:1> and size. */
276 switch (info->qualifier)
278 case AARCH64_OPND_QLF_S_B:
279 /* Index encoded in "Q:S:size". */
280 QSsize = info->reglist.index;
283 case AARCH64_OPND_QLF_S_H:
284 /* Index encoded in "Q:S:size<1>". */
285 QSsize = info->reglist.index << 1;
288 case AARCH64_OPND_QLF_S_S:
289 /* Index encoded in "Q:S". */
290 QSsize = info->reglist.index << 2;
293 case AARCH64_OPND_QLF_S_D:
294 /* Index encoded in "Q". */
295 QSsize = info->reglist.index << 3 | 0x1;
301 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
302 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
303 insert_field_2 (&field, code, opcodeh2, 0);
308 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
309 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
310 or SSHR <V><d>, <V><n>, #<shift>. */
312 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
313 const aarch64_opnd_info *info,
314 aarch64_insn *code, const aarch64_inst *inst)
316 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
319 if (inst->opcode->iclass == asimdshf)
323 0000 x SEE AdvSIMD modified immediate
332 Q = (val & 0x1) ? 1 : 0;
333 insert_field (FLD_Q, code, Q, inst->opcode->mask);
337 assert (info->type == AARCH64_OPND_IMM_VLSR
338 || info->type == AARCH64_OPND_IMM_VLSL);
340 if (info->type == AARCH64_OPND_IMM_VLSR)
343 0000 SEE AdvSIMD modified immediate
344 0001 (16-UInt(immh:immb))
345 001x (32-UInt(immh:immb))
346 01xx (64-UInt(immh:immb))
347 1xxx (128-UInt(immh:immb)) */
348 imm = (16 << (unsigned)val) - info->imm.value;
352 0000 SEE AdvSIMD modified immediate
353 0001 (UInt(immh:immb)-8)
354 001x (UInt(immh:immb)-16)
355 01xx (UInt(immh:immb)-32)
356 1xxx (UInt(immh:immb)-64) */
357 imm = info->imm.value + (8 << (unsigned)val);
358 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
363 /* Insert fields for e.g. the immediate operands in
364 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
366 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
372 imm = info->imm.value;
373 if (operand_need_shift_by_two (self))
375 insert_all_fields (self, code, imm);
379 /* Insert immediate and its shift amount for e.g. the last operand in
380 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
382 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
383 aarch64_insn *code, const aarch64_inst *inst)
386 aarch64_ins_imm (self, info, code, inst);
388 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
392 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
393 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
395 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
396 const aarch64_opnd_info *info,
398 const aarch64_inst *inst ATTRIBUTE_UNUSED)
400 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
401 uint64_t imm = info->imm.value;
402 enum aarch64_modifier_kind kind = info->shifter.kind;
403 int amount = info->shifter.amount;
404 aarch64_field field = {0, 0};
406 /* a:b:c:d:e:f:g:h */
407 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
409 /* Either MOVI <Dd>, #<imm>
410 or MOVI <Vd>.2D, #<imm>.
411 <imm> is a 64-bit immediate
412 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
413 encoded in "a:b:c:d:e:f:g:h". */
414 imm = aarch64_shrink_expanded_imm8 (imm);
415 assert ((int)imm >= 0);
417 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
419 if (kind == AARCH64_MOD_NONE)
422 /* shift amount partially in cmode */
423 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
424 if (kind == AARCH64_MOD_LSL)
426 /* AARCH64_MOD_LSL: shift zeros. */
427 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
428 assert (esize == 4 || esize == 2 || esize == 1);
429 /* For 8-bit move immediate, the optional LSL #0 does not require
435 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
437 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
441 /* AARCH64_MOD_MSL: shift ones. */
443 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
445 insert_field_2 (&field, code, amount, 0);
450 /* Insert fields for an 8-bit floating-point immediate. */
452 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
454 const aarch64_inst *inst ATTRIBUTE_UNUSED)
456 insert_all_fields (self, code, info->imm.value);
460 /* Insert 1-bit rotation immediate (#90 or #270). */
462 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
463 const aarch64_opnd_info *info,
464 aarch64_insn *code, const aarch64_inst *inst)
466 uint64_t rot = (info->imm.value - 90) / 180;
468 insert_field (self->fields[0], code, rot, inst->opcode->mask);
472 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
474 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
475 const aarch64_opnd_info *info,
476 aarch64_insn *code, const aarch64_inst *inst)
478 uint64_t rot = info->imm.value / 90;
480 insert_field (self->fields[0], code, rot, inst->opcode->mask);
484 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
485 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
487 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
489 const aarch64_inst *inst ATTRIBUTE_UNUSED)
491 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
495 /* Insert arithmetic immediate for e.g. the last operand in
496 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
498 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
499 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
502 aarch64_insn value = info->shifter.amount ? 1 : 0;
503 insert_field (self->fields[0], code, value, 0);
504 /* imm12 (unsigned) */
505 insert_field (self->fields[1], code, info->imm.value, 0);
509 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
510 the operand should be inverted before encoding. */
512 aarch64_ins_limm_1 (const aarch64_operand *self,
513 const aarch64_opnd_info *info, aarch64_insn *code,
514 const aarch64_inst *inst, bfd_boolean invert_p)
517 uint64_t imm = info->imm.value;
518 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
522 /* The constraint check should have guaranteed this wouldn't happen. */
523 assert (aarch64_logical_immediate_p (imm, esize, &value));
525 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
530 /* Insert logical/bitmask immediate for e.g. the last operand in
531 ORR <Wd|WSP>, <Wn>, #<imm>. */
533 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
534 aarch64_insn *code, const aarch64_inst *inst)
536 return aarch64_ins_limm_1 (self, info, code, inst,
537 inst->opcode->op == OP_BIC);
540 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
542 aarch64_ins_inv_limm (const aarch64_operand *self,
543 const aarch64_opnd_info *info, aarch64_insn *code,
544 const aarch64_inst *inst)
546 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
549 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
550 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
552 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
553 aarch64_insn *code, const aarch64_inst *inst)
555 aarch64_insn value = 0;
557 assert (info->idx == 0);
560 aarch64_ins_regno (self, info, code, inst);
561 if (inst->opcode->iclass == ldstpair_indexed
562 || inst->opcode->iclass == ldstnapair_offs
563 || inst->opcode->iclass == ldstpair_off
564 || inst->opcode->iclass == loadlit)
567 switch (info->qualifier)
569 case AARCH64_OPND_QLF_S_S: value = 0; break;
570 case AARCH64_OPND_QLF_S_D: value = 1; break;
571 case AARCH64_OPND_QLF_S_Q: value = 2; break;
574 insert_field (FLD_ldst_size, code, value, 0);
579 value = aarch64_get_qualifier_standard_value (info->qualifier);
580 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
586 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
588 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
589 const aarch64_opnd_info *info, aarch64_insn *code,
590 const aarch64_inst *inst ATTRIBUTE_UNUSED)
593 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
597 /* Encode the address operand for e.g.
598 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
600 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
601 const aarch64_opnd_info *info, aarch64_insn *code,
602 const aarch64_inst *inst ATTRIBUTE_UNUSED)
605 enum aarch64_modifier_kind kind = info->shifter.kind;
608 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
610 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
612 if (kind == AARCH64_MOD_LSL)
613 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
614 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
616 if (info->qualifier != AARCH64_OPND_QLF_S_B)
617 S = info->shifter.amount != 0;
619 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
623 Must be #0 if <extend> is explicitly LSL. */
624 S = info->shifter.operator_present && info->shifter.amount_present;
625 insert_field (FLD_S, code, S, 0);
630 /* Encode the address operand for e.g.
631 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
633 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
634 const aarch64_opnd_info *info, aarch64_insn *code,
635 const aarch64_inst *inst ATTRIBUTE_UNUSED)
638 insert_field (self->fields[0], code, info->addr.base_regno, 0);
641 int imm = info->addr.offset.imm;
642 insert_field (self->fields[1], code, imm, 0);
645 if (info->addr.writeback)
647 assert (info->addr.preind == 1 && info->addr.postind == 0);
648 insert_field (self->fields[2], code, 1, 0);
653 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
655 aarch64_ins_addr_simm (const aarch64_operand *self,
656 const aarch64_opnd_info *info,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED)
663 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
664 /* simm (imm9 or imm7) */
665 imm = info->addr.offset.imm;
666 if (self->fields[0] == FLD_imm7)
667 /* scaled immediate in ld/st pair instructions.. */
668 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
669 insert_field (self->fields[0], code, imm, 0);
670 /* pre/post- index */
671 if (info->addr.writeback)
673 assert (inst->opcode->iclass != ldst_unscaled
674 && inst->opcode->iclass != ldstnapair_offs
675 && inst->opcode->iclass != ldstpair_off
676 && inst->opcode->iclass != ldst_unpriv);
677 assert (info->addr.preind != info->addr.postind);
678 if (info->addr.preind)
679 insert_field (self->fields[1], code, 1, 0);
685 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
687 aarch64_ins_addr_simm10 (const aarch64_operand *self,
688 const aarch64_opnd_info *info,
690 const aarch64_inst *inst ATTRIBUTE_UNUSED)
695 insert_field (self->fields[0], code, info->addr.base_regno, 0);
697 imm = info->addr.offset.imm >> 3;
698 insert_field (self->fields[1], code, imm >> 9, 0);
699 insert_field (self->fields[2], code, imm, 0);
701 if (info->addr.writeback)
703 assert (info->addr.preind == 1 && info->addr.postind == 0);
704 insert_field (self->fields[3], code, 1, 0);
709 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
711 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
712 const aarch64_opnd_info *info,
714 const aarch64_inst *inst ATTRIBUTE_UNUSED)
716 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
719 insert_field (self->fields[0], code, info->addr.base_regno, 0);
721 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
725 /* Encode the address operand for e.g.
726 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
728 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
729 const aarch64_opnd_info *info, aarch64_insn *code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED)
733 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
735 if (info->addr.offset.is_reg)
736 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
738 insert_field (FLD_Rm, code, 0x1f, 0);
742 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
744 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
745 const aarch64_opnd_info *info, aarch64_insn *code,
746 const aarch64_inst *inst ATTRIBUTE_UNUSED)
749 insert_field (FLD_cond, code, info->cond->value, 0);
753 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
755 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
756 const aarch64_opnd_info *info, aarch64_insn *code,
757 const aarch64_inst *inst ATTRIBUTE_UNUSED)
759 /* op0:op1:CRn:CRm:op2 */
760 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
761 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
765 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
767 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
768 const aarch64_opnd_info *info, aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED)
772 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
777 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
779 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
780 const aarch64_opnd_info *info, aarch64_insn *code,
781 const aarch64_inst *inst ATTRIBUTE_UNUSED)
783 /* op1:CRn:CRm:op2 */
784 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
785 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
789 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
792 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
793 const aarch64_opnd_info *info, aarch64_insn *code,
794 const aarch64_inst *inst ATTRIBUTE_UNUSED)
797 insert_field (FLD_CRm, code, info->barrier->value, 0);
801 /* Encode the prefetch operation option operand for e.g.
802 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
805 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
806 const aarch64_opnd_info *info, aarch64_insn *code,
807 const aarch64_inst *inst ATTRIBUTE_UNUSED)
810 insert_field (FLD_Rt, code, info->prfop->value, 0);
814 /* Encode the hint number for instructions that alias HINT but take an
818 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
819 const aarch64_opnd_info *info, aarch64_insn *code,
820 const aarch64_inst *inst ATTRIBUTE_UNUSED)
823 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
827 /* Encode the extended register operand for e.g.
828 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
830 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
831 const aarch64_opnd_info *info, aarch64_insn *code,
832 const aarch64_inst *inst ATTRIBUTE_UNUSED)
834 enum aarch64_modifier_kind kind;
837 insert_field (FLD_Rm, code, info->reg.regno, 0);
839 kind = info->shifter.kind;
840 if (kind == AARCH64_MOD_LSL)
841 kind = info->qualifier == AARCH64_OPND_QLF_W
842 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
843 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
845 insert_field (FLD_imm3, code, info->shifter.amount, 0);
850 /* Encode the shifted register operand for e.g.
851 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
853 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
854 const aarch64_opnd_info *info, aarch64_insn *code,
855 const aarch64_inst *inst ATTRIBUTE_UNUSED)
858 insert_field (FLD_Rm, code, info->reg.regno, 0);
860 insert_field (FLD_shift, code,
861 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
863 insert_field (FLD_imm6, code, info->shifter.amount, 0);
868 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
869 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
870 SELF's operand-dependent value. fields[0] specifies the field that
871 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
873 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
874 const aarch64_opnd_info *info,
876 const aarch64_inst *inst ATTRIBUTE_UNUSED)
878 int factor = 1 + get_operand_specific_data (self);
879 insert_field (self->fields[0], code, info->addr.base_regno, 0);
880 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
884 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
885 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
886 SELF's operand-dependent value. fields[0] specifies the field that
887 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
889 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
890 const aarch64_opnd_info *info,
892 const aarch64_inst *inst ATTRIBUTE_UNUSED)
894 int factor = 1 + get_operand_specific_data (self);
895 insert_field (self->fields[0], code, info->addr.base_regno, 0);
896 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
900 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
901 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
902 SELF's operand-dependent value. fields[0] specifies the field that
903 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
904 and imm3 fields, with imm3 being the less-significant part. */
906 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
907 const aarch64_opnd_info *info,
909 const aarch64_inst *inst ATTRIBUTE_UNUSED)
911 int factor = 1 + get_operand_specific_data (self);
912 insert_field (self->fields[0], code, info->addr.base_regno, 0);
913 insert_fields (code, info->addr.offset.imm / factor, 0,
914 2, FLD_imm3, FLD_SVE_imm6);
918 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
919 is a 4-bit signed number and where <shift> is SELF's operand-dependent
920 value. fields[0] specifies the base register field. */
922 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
923 const aarch64_opnd_info *info, aarch64_insn *code,
924 const aarch64_inst *inst ATTRIBUTE_UNUSED)
926 int factor = 1 << get_operand_specific_data (self);
927 insert_field (self->fields[0], code, info->addr.base_regno, 0);
928 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
932 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
933 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
934 value. fields[0] specifies the base register field. */
936 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
937 const aarch64_opnd_info *info, aarch64_insn *code,
938 const aarch64_inst *inst ATTRIBUTE_UNUSED)
940 int factor = 1 << get_operand_specific_data (self);
941 insert_field (self->fields[0], code, info->addr.base_regno, 0);
942 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
946 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
947 is SELF's operand-dependent value. fields[0] specifies the base
948 register field and fields[1] specifies the offset register field. */
950 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
951 const aarch64_opnd_info *info, aarch64_insn *code,
952 const aarch64_inst *inst ATTRIBUTE_UNUSED)
954 insert_field (self->fields[0], code, info->addr.base_regno, 0);
955 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
959 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
960 <shift> is SELF's operand-dependent value. fields[0] specifies the
961 base register field, fields[1] specifies the offset register field and
962 fields[2] is a single-bit field that selects SXTW over UXTW. */
964 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
965 const aarch64_opnd_info *info, aarch64_insn *code,
966 const aarch64_inst *inst ATTRIBUTE_UNUSED)
968 insert_field (self->fields[0], code, info->addr.base_regno, 0);
969 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
970 if (info->shifter.kind == AARCH64_MOD_UXTW)
971 insert_field (self->fields[2], code, 0, 0);
973 insert_field (self->fields[2], code, 1, 0);
977 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
978 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
979 fields[0] specifies the base register field. */
981 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
982 const aarch64_opnd_info *info, aarch64_insn *code,
983 const aarch64_inst *inst ATTRIBUTE_UNUSED)
985 int factor = 1 << get_operand_specific_data (self);
986 insert_field (self->fields[0], code, info->addr.base_regno, 0);
987 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
991 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
992 where <modifier> is fixed by the instruction and where <msz> is a
993 2-bit unsigned number. fields[0] specifies the base register field
994 and fields[1] specifies the offset register field. */
996 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
997 const aarch64_opnd_info *info, aarch64_insn *code)
999 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1000 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1001 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1005 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1006 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1007 field and fields[1] specifies the offset register field. */
1009 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1010 const aarch64_opnd_info *info, aarch64_insn *code,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1013 return aarch64_ext_sve_addr_zz (self, info, code);
1016 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1017 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1018 field and fields[1] specifies the offset register field. */
1020 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1021 const aarch64_opnd_info *info,
1023 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1025 return aarch64_ext_sve_addr_zz (self, info, code);
1028 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1029 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1030 field and fields[1] specifies the offset register field. */
1032 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1033 const aarch64_opnd_info *info,
1035 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1037 return aarch64_ext_sve_addr_zz (self, info, code);
1040 /* Encode an SVE ADD/SUB immediate. */
1042 aarch64_ins_sve_aimm (const aarch64_operand *self,
1043 const aarch64_opnd_info *info, aarch64_insn *code,
1044 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1046 if (info->shifter.amount == 8)
1047 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1048 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1049 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1051 insert_all_fields (self, code, info->imm.value & 0xff);
1055 /* Encode an SVE CPY/DUP immediate. */
1057 aarch64_ins_sve_asimm (const aarch64_operand *self,
1058 const aarch64_opnd_info *info, aarch64_insn *code,
1059 const aarch64_inst *inst)
1061 return aarch64_ins_sve_aimm (self, info, code, inst);
1064 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1065 array specifies which field to use for Zn. MM is encoded in the
1066 concatenation of imm5 and SVE_tszh, with imm5 being the less
1067 significant part. */
1069 aarch64_ins_sve_index (const aarch64_operand *self,
1070 const aarch64_opnd_info *info, aarch64_insn *code,
1071 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1073 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1074 insert_field (self->fields[0], code, info->reglane.regno, 0);
1075 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1076 2, FLD_imm5, FLD_SVE_tszh);
1080 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1082 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1083 const aarch64_opnd_info *info, aarch64_insn *code,
1084 const aarch64_inst *inst)
1086 return aarch64_ins_limm (self, info, code, inst);
1089 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1090 and where MM occupies the most-significant part. The operand-dependent
1091 value specifies the number of bits in Zn. */
1093 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1094 const aarch64_opnd_info *info, aarch64_insn *code,
1095 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1097 unsigned int reg_bits = get_operand_specific_data (self);
1098 assert (info->reglane.regno < (1U << reg_bits));
1099 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1100 insert_all_fields (self, code, val);
1104 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1107 aarch64_ins_sve_reglist (const aarch64_operand *self,
1108 const aarch64_opnd_info *info, aarch64_insn *code,
1109 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1111 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1115 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1116 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1119 aarch64_ins_sve_scale (const aarch64_operand *self,
1120 const aarch64_opnd_info *info, aarch64_insn *code,
1121 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1123 insert_all_fields (self, code, info->imm.value);
1124 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1128 /* Encode an SVE shift left immediate. */
1130 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1131 const aarch64_opnd_info *info, aarch64_insn *code,
1132 const aarch64_inst *inst)
1134 const aarch64_opnd_info *prev_operand;
1137 assert (info->idx > 0);
1138 prev_operand = &inst->operands[info->idx - 1];
1139 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1140 insert_all_fields (self, code, 8 * esize + info->imm.value);
1144 /* Encode an SVE shift right immediate. */
1146 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1147 const aarch64_opnd_info *info, aarch64_insn *code,
1148 const aarch64_inst *inst)
1150 const aarch64_opnd_info *prev_operand;
1153 assert (info->idx > 0);
1154 prev_operand = &inst->operands[info->idx - 1];
1155 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1156 insert_all_fields (self, code, 16 * esize - info->imm.value);
1160 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1161 The fields array specifies which field to use. */
1163 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1164 const aarch64_opnd_info *info,
1166 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1168 if (info->imm.value == 0x3f000000)
1169 insert_field (self->fields[0], code, 0, 0);
1171 insert_field (self->fields[0], code, 1, 0);
1175 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1176 The fields array specifies which field to use. */
1178 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1179 const aarch64_opnd_info *info,
1181 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1183 if (info->imm.value == 0x3f000000)
1184 insert_field (self->fields[0], code, 0, 0);
1186 insert_field (self->fields[0], code, 1, 0);
1190 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1191 The fields array specifies which field to use. */
1193 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1194 const aarch64_opnd_info *info,
1196 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1198 if (info->imm.value == 0)
1199 insert_field (self->fields[0], code, 0, 0);
1201 insert_field (self->fields[0], code, 1, 0);
1205 /* Miscellaneous encoding functions. */
1207 /* Encode size[0], i.e. bit 22, for
1208 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1211 encode_asimd_fcvt (aarch64_inst *inst)
1214 aarch64_field field = {0, 0};
1215 enum aarch64_opnd_qualifier qualifier;
1217 switch (inst->opcode->op)
1221 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1222 qualifier = inst->operands[1].qualifier;
1226 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1227 qualifier = inst->operands[0].qualifier;
1232 assert (qualifier == AARCH64_OPND_QLF_V_4S
1233 || qualifier == AARCH64_OPND_QLF_V_2D);
1234 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1235 gen_sub_field (FLD_size, 0, 1, &field);
1236 insert_field_2 (&field, &inst->value, value, 0);
1239 /* Encode size[0], i.e. bit 22, for
1240 e.g. FCVTXN <Vb><d>, <Va><n>. */
1243 encode_asisd_fcvtxn (aarch64_inst *inst)
1245 aarch64_insn val = 1;
1246 aarch64_field field = {0, 0};
1247 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1248 gen_sub_field (FLD_size, 0, 1, &field);
1249 insert_field_2 (&field, &inst->value, val, 0);
1252 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1254 encode_fcvt (aarch64_inst *inst)
1257 const aarch64_field field = {15, 2};
1260 switch (inst->operands[0].qualifier)
1262 case AARCH64_OPND_QLF_S_S: val = 0; break;
1263 case AARCH64_OPND_QLF_S_D: val = 1; break;
1264 case AARCH64_OPND_QLF_S_H: val = 3; break;
1267 insert_field_2 (&field, &inst->value, val, 0);
1272 /* Return the index in qualifiers_list that INST is using. Should only
1273 be called once the qualifiers are known to be valid. */
1276 aarch64_get_variant (struct aarch64_inst *inst)
1278 int i, nops, variant;
1280 nops = aarch64_num_of_operands (inst->opcode);
1281 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1283 for (i = 0; i < nops; ++i)
1284 if (inst->opcode->qualifiers_list[variant][i]
1285 != inst->operands[i].qualifier)
1293 /* Do miscellaneous encodings that are not common enough to be driven by
1297 do_misc_encoding (aarch64_inst *inst)
1301 switch (inst->opcode->op)
1310 encode_asimd_fcvt (inst);
1313 encode_asisd_fcvtxn (inst);
1317 /* Copy Pn to Pm and Pg. */
1318 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1319 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1320 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1323 /* Copy Zd to Zm. */
1324 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1325 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1328 /* Fill in the zero immediate. */
1329 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1330 2, FLD_imm5, FLD_SVE_tszh);
1333 /* Copy Zn to Zm. */
1334 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1335 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1340 /* Copy Pd to Pm. */
1341 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1342 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1344 case OP_MOVZS_P_P_P:
1346 /* Copy Pn to Pm. */
1347 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1348 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1350 case OP_NOTS_P_P_P_Z:
1351 case OP_NOT_P_P_P_Z:
1352 /* Copy Pg to Pm. */
1353 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1354 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1360 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1362 encode_sizeq (aarch64_inst *inst)
1365 enum aarch64_field_kind kind;
1368 /* Get the index of the operand whose information we are going to use
1369 to encode the size and Q fields.
1370 This is deduced from the possible valid qualifier lists. */
1371 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1372 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1373 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1374 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1376 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1378 if (inst->opcode->iclass == asisdlse
1379 || inst->opcode->iclass == asisdlsep
1380 || inst->opcode->iclass == asisdlso
1381 || inst->opcode->iclass == asisdlsop)
1382 kind = FLD_vldst_size;
1385 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1388 /* Opcodes that have fields shared by multiple operands are usually flagged
1389 with flags. In this function, we detect such flags and use the
1390 information in one of the related operands to do the encoding. The 'one'
1391 operand is not any operand but one of the operands that has the enough
1392 information for such an encoding. */
1395 do_special_encoding (struct aarch64_inst *inst)
1398 aarch64_insn value = 0;
1400 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1402 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1403 if (inst->opcode->flags & F_COND)
1405 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1407 if (inst->opcode->flags & F_SF)
1409 idx = select_operand_for_sf_field_coding (inst->opcode);
1410 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1411 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1413 insert_field (FLD_sf, &inst->value, value, 0);
1414 if (inst->opcode->flags & F_N)
1415 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1417 if (inst->opcode->flags & F_LSE_SZ)
1419 idx = select_operand_for_sf_field_coding (inst->opcode);
1420 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1421 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1423 insert_field (FLD_lse_sz, &inst->value, value, 0);
1425 if (inst->opcode->flags & F_SIZEQ)
1426 encode_sizeq (inst);
1427 if (inst->opcode->flags & F_FPTYPE)
1429 idx = select_operand_for_fptype_field_coding (inst->opcode);
1430 switch (inst->operands[idx].qualifier)
1432 case AARCH64_OPND_QLF_S_S: value = 0; break;
1433 case AARCH64_OPND_QLF_S_D: value = 1; break;
1434 case AARCH64_OPND_QLF_S_H: value = 3; break;
1435 default: assert (0);
1437 insert_field (FLD_type, &inst->value, value, 0);
1439 if (inst->opcode->flags & F_SSIZE)
1441 enum aarch64_opnd_qualifier qualifier;
1442 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1443 qualifier = inst->operands[idx].qualifier;
1444 assert (qualifier >= AARCH64_OPND_QLF_S_B
1445 && qualifier <= AARCH64_OPND_QLF_S_Q);
1446 value = aarch64_get_qualifier_standard_value (qualifier);
1447 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1449 if (inst->opcode->flags & F_T)
1451 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1452 aarch64_field field = {0, 0};
1453 enum aarch64_opnd_qualifier qualifier;
1456 qualifier = inst->operands[idx].qualifier;
1457 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1458 == AARCH64_OPND_CLASS_SIMD_REG
1459 && qualifier >= AARCH64_OPND_QLF_V_8B
1460 && qualifier <= AARCH64_OPND_QLF_V_2D);
1471 value = aarch64_get_qualifier_standard_value (qualifier);
1472 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1473 num = (int) value >> 1;
1474 assert (num >= 0 && num <= 3);
1475 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1476 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1478 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1480 /* Use Rt to encode in the case of e.g.
1481 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1482 enum aarch64_opnd_qualifier qualifier;
1483 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1485 /* Otherwise use the result operand, which has to be a integer
1488 assert (idx == 0 || idx == 1);
1489 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1490 == AARCH64_OPND_CLASS_INT_REG);
1491 qualifier = inst->operands[idx].qualifier;
1492 insert_field (FLD_Q, &inst->value,
1493 aarch64_get_qualifier_standard_value (qualifier), 0);
1495 if (inst->opcode->flags & F_LDS_SIZE)
1497 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1498 enum aarch64_opnd_qualifier qualifier;
1499 aarch64_field field = {0, 0};
1500 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1501 == AARCH64_OPND_CLASS_INT_REG);
1502 gen_sub_field (FLD_opc, 0, 1, &field);
1503 qualifier = inst->operands[0].qualifier;
1504 insert_field_2 (&field, &inst->value,
1505 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1507 /* Miscellaneous encoding as the last step. */
1508 if (inst->opcode->flags & F_MISC)
1509 do_misc_encoding (inst);
1511 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1514 /* Some instructions (including all SVE ones) use the instruction class
1515 to describe how a qualifiers_list index is represented in the instruction
1516 encoding. If INST is such an instruction, encode the chosen qualifier
1520 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1522 switch (inst->opcode->iclass)
1525 insert_fields (&inst->value, aarch64_get_variant (inst),
1526 0, 2, FLD_SVE_M_14, FLD_size);
1530 case sve_shift_pred:
1531 case sve_shift_unpred:
1532 /* For indices and shift amounts, the variant is encoded as
1533 part of the immediate. */
1537 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1538 and depend on the immediate. They don't have a separate
1543 /* sve_misc instructions have only a single variant. */
1547 insert_fields (&inst->value, aarch64_get_variant (inst),
1548 0, 2, FLD_SVE_M_16, FLD_size);
1552 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1557 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1561 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1565 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1573 /* Converters converting an alias opcode instruction to its real form. */
1575 /* ROR <Wd>, <Ws>, #<shift>
1577 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1579 convert_ror_to_extr (aarch64_inst *inst)
1581 copy_operand_info (inst, 3, 2);
1582 copy_operand_info (inst, 2, 1);
1585 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1587 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1589 convert_xtl_to_shll (aarch64_inst *inst)
1591 inst->operands[2].qualifier = inst->operands[1].qualifier;
1592 inst->operands[2].imm.value = 0;
1596 LSR <Xd>, <Xn>, #<shift>
1598 UBFM <Xd>, <Xn>, #<shift>, #63. */
1600 convert_sr_to_bfm (aarch64_inst *inst)
1602 inst->operands[3].imm.value =
1603 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1606 /* Convert MOV to ORR. */
1608 convert_mov_to_orr (aarch64_inst *inst)
1610 /* MOV <Vd>.<T>, <Vn>.<T>
1612 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1613 copy_operand_info (inst, 2, 1);
1616 /* When <imms> >= <immr>, the instruction written:
1617 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1619 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1622 convert_bfx_to_bfm (aarch64_inst *inst)
1626 /* Convert the operand. */
1627 lsb = inst->operands[2].imm.value;
1628 width = inst->operands[3].imm.value;
1629 inst->operands[2].imm.value = lsb;
1630 inst->operands[3].imm.value = lsb + width - 1;
1633 /* When <imms> < <immr>, the instruction written:
1634 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1636 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1639 convert_bfi_to_bfm (aarch64_inst *inst)
1643 /* Convert the operand. */
1644 lsb = inst->operands[2].imm.value;
1645 width = inst->operands[3].imm.value;
1646 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1648 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1649 inst->operands[3].imm.value = width - 1;
1653 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1654 inst->operands[3].imm.value = width - 1;
1658 /* The instruction written:
1659 BFC <Xd>, #<lsb>, #<width>
1661 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1664 convert_bfc_to_bfm (aarch64_inst *inst)
1669 copy_operand_info (inst, 3, 2);
1670 copy_operand_info (inst, 2, 1);
1671 copy_operand_info (inst, 1, 0);
1672 inst->operands[1].reg.regno = 0x1f;
1674 /* Convert the immediate operand. */
1675 lsb = inst->operands[2].imm.value;
1676 width = inst->operands[3].imm.value;
1677 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1679 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1680 inst->operands[3].imm.value = width - 1;
1684 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1685 inst->operands[3].imm.value = width - 1;
1689 /* The instruction written:
1690 LSL <Xd>, <Xn>, #<shift>
1692 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1695 convert_lsl_to_ubfm (aarch64_inst *inst)
1697 int64_t shift = inst->operands[2].imm.value;
1699 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1701 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1702 inst->operands[3].imm.value = 31 - shift;
1706 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1707 inst->operands[3].imm.value = 63 - shift;
1711 /* CINC <Wd>, <Wn>, <cond>
1713 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1716 convert_to_csel (aarch64_inst *inst)
1718 copy_operand_info (inst, 3, 2);
1719 copy_operand_info (inst, 2, 1);
1720 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1723 /* CSET <Wd>, <cond>
1725 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1728 convert_cset_to_csinc (aarch64_inst *inst)
1730 copy_operand_info (inst, 3, 1);
1731 copy_operand_info (inst, 2, 0);
1732 copy_operand_info (inst, 1, 0);
1733 inst->operands[1].reg.regno = 0x1f;
1734 inst->operands[2].reg.regno = 0x1f;
1735 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1740 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1743 convert_mov_to_movewide (aarch64_inst *inst)
1746 uint32_t shift_amount;
1749 switch (inst->opcode->op)
1751 case OP_MOV_IMM_WIDE:
1752 value = inst->operands[1].imm.value;
1754 case OP_MOV_IMM_WIDEN:
1755 value = ~inst->operands[1].imm.value;
1760 inst->operands[1].type = AARCH64_OPND_HALF;
1761 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1762 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1763 /* The constraint check should have guaranteed this wouldn't happen. */
1765 value >>= shift_amount;
1767 inst->operands[1].imm.value = value;
1768 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1769 inst->operands[1].shifter.amount = shift_amount;
1774 ORR <Wd>, WZR, #<imm>. */
1777 convert_mov_to_movebitmask (aarch64_inst *inst)
1779 copy_operand_info (inst, 2, 1);
1780 inst->operands[1].reg.regno = 0x1f;
1781 inst->operands[1].skip = 0;
1784 /* Some alias opcodes are assembled by being converted to their real-form. */
1787 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1789 const aarch64_opcode *alias = inst->opcode;
1791 if ((alias->flags & F_CONV) == 0)
1792 goto convert_to_real_return;
1798 convert_sr_to_bfm (inst);
1801 convert_lsl_to_ubfm (inst);
1806 convert_to_csel (inst);
1810 convert_cset_to_csinc (inst);
1815 convert_bfx_to_bfm (inst);
1820 convert_bfi_to_bfm (inst);
1823 convert_bfc_to_bfm (inst);
1826 convert_mov_to_orr (inst);
1828 case OP_MOV_IMM_WIDE:
1829 case OP_MOV_IMM_WIDEN:
1830 convert_mov_to_movewide (inst);
1832 case OP_MOV_IMM_LOG:
1833 convert_mov_to_movebitmask (inst);
1836 convert_ror_to_extr (inst);
1842 convert_xtl_to_shll (inst);
1848 convert_to_real_return:
1849 aarch64_replace_opcode (inst, real);
1852 /* Encode *INST_ORI of the opcode code OPCODE.
1853 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1854 matched operand qualifier sequence in *QLF_SEQ. */
1857 aarch64_opcode_encode (const aarch64_opcode *opcode,
1858 const aarch64_inst *inst_ori, aarch64_insn *code,
1859 aarch64_opnd_qualifier_t *qlf_seq,
1860 aarch64_operand_error *mismatch_detail)
1863 const aarch64_opcode *aliased;
1864 aarch64_inst copy, *inst;
1866 DEBUG_TRACE ("enter with %s", opcode->name);
1868 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1872 assert (inst->opcode == NULL || inst->opcode == opcode);
1873 if (inst->opcode == NULL)
1874 inst->opcode = opcode;
1876 /* Constrain the operands.
1877 After passing this, the encoding is guaranteed to succeed. */
1878 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1880 DEBUG_TRACE ("FAIL since operand constraint not met");
1884 /* Get the base value.
1885 Note: this has to be before the aliasing handling below in order to
1886 get the base value from the alias opcode before we move on to the
1887 aliased opcode for encoding. */
1888 inst->value = opcode->opcode;
1890 /* No need to do anything else if the opcode does not have any operand. */
1891 if (aarch64_num_of_operands (opcode) == 0)
1894 /* Assign operand indexes and check types. Also put the matched
1895 operand qualifiers in *QLF_SEQ to return. */
1896 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1898 assert (opcode->operands[i] == inst->operands[i].type);
1899 inst->operands[i].idx = i;
1900 if (qlf_seq != NULL)
1901 *qlf_seq = inst->operands[i].qualifier;
1904 aliased = aarch64_find_real_opcode (opcode);
1905 /* If the opcode is an alias and it does not ask for direct encoding by
1906 itself, the instruction will be transformed to the form of real opcode
1907 and the encoding will be carried out using the rules for the aliased
1909 if (aliased != NULL && (opcode->flags & F_CONV))
1911 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1912 aliased->name, opcode->name);
1913 /* Convert the operands to the form of the real opcode. */
1914 convert_to_real (inst, aliased);
1918 aarch64_opnd_info *info = inst->operands;
1920 /* Call the inserter of each operand. */
1921 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1923 const aarch64_operand *opnd;
1924 enum aarch64_opnd type = opcode->operands[i];
1925 if (type == AARCH64_OPND_NIL)
1929 DEBUG_TRACE ("skip the incomplete operand %d", i);
1932 opnd = &aarch64_operands[type];
1933 if (operand_has_inserter (opnd))
1934 aarch64_insert_operand (opnd, info, &inst->value, inst);
1937 /* Call opcode encoders indicated by flags. */
1938 if (opcode_has_special_coder (opcode))
1939 do_special_encoding (inst);
1941 /* Possibly use the instruction class to encode the chosen qualifier
1943 aarch64_encode_variant_using_iclass (inst);
1946 DEBUG_TRACE ("exit with %s", opcode->name);
1948 *code = inst->value;