1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
66 if (CONST_STRNEQ (option, "aliases"))
73 if (CONST_STRNEQ (option, "debug_dump"))
78 #endif /* DEBUG_AARCH64 */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
85 parse_aarch64_dis_options (const char *options)
87 const char *option_end;
92 while (*options != '\0')
94 /* Skip empty options. */
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
106 parse_aarch64_dis_option (options, option_end - options);
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
114 /* Functions doing the instruction disassembling. */
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
135 num = va_arg (va, uint32_t);
137 aarch64_insn value = 0x0;
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
156 enum aarch64_field_kind kind;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
172 uint32_t ret = value;
175 if ((value >> i) & 0x1)
177 uint32_t val = (uint32_t)(-1) << i;
180 return (int32_t) ret;
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
204 /* Instructions using vector type 2H should not call this function. Skip over
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
238 return qualifiers[i];
240 return AARCH64_OPND_QLF_NIL;
243 /* Operand extractors. */
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
250 info->reg.regno = extract_field (self->fields[0], code, 0);
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
259 assert (info->idx == 1
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
265 /* e.g. IC <ic_op>{, <Xt>}. */
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
290 info->reglane.regno = extract_field (self->fields[0], code,
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
337 case AARCH64_OPND_QLF_S_H:
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
341 info->reglane.regno &= 0xf;
343 case AARCH64_OPND_QLF_S_S:
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
347 case AARCH64_OPND_QLF_S_D:
349 info->reglane.index = extract_field (FLD_H, code, 0);
360 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
361 const aarch64_insn code,
362 const aarch64_inst *inst ATTRIBUTE_UNUSED)
365 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
367 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
371 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
373 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
374 aarch64_opnd_info *info, const aarch64_insn code,
375 const aarch64_inst *inst)
378 /* Number of elements in each structure to be loaded/stored. */
379 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
383 unsigned is_reserved;
385 unsigned num_elements;
401 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
403 value = extract_field (FLD_opcode, code, 0);
404 if (expected_num != data[value].num_elements || data[value].is_reserved)
406 info->reglist.num_regs = data[value].num_regs;
411 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
412 lanes instructions. */
414 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
415 aarch64_opnd_info *info, const aarch64_insn code,
416 const aarch64_inst *inst)
421 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
423 value = extract_field (FLD_S, code, 0);
425 /* Number of registers is equal to the number of elements in
426 each structure to be loaded/stored. */
427 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
428 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
430 /* Except when it is LD1R. */
431 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
432 info->reglist.num_regs = 2;
437 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
438 load/store single element instructions. */
440 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst ATTRIBUTE_UNUSED)
444 aarch64_field field = {0, 0};
445 aarch64_insn QSsize; /* fields Q:S:size. */
446 aarch64_insn opcodeh2; /* opcode<2:1> */
449 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
451 /* Decode the index, opcode<2:1> and size. */
452 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
453 opcodeh2 = extract_field_2 (&field, code, 0);
454 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
458 info->qualifier = AARCH64_OPND_QLF_S_B;
459 /* Index encoded in "Q:S:size". */
460 info->reglist.index = QSsize;
466 info->qualifier = AARCH64_OPND_QLF_S_H;
467 /* Index encoded in "Q:S:size<1>". */
468 info->reglist.index = QSsize >> 1;
471 if ((QSsize >> 1) & 0x1)
474 if ((QSsize & 0x1) == 0)
476 info->qualifier = AARCH64_OPND_QLF_S_S;
477 /* Index encoded in "Q:S". */
478 info->reglist.index = QSsize >> 2;
482 if (extract_field (FLD_S, code, 0))
485 info->qualifier = AARCH64_OPND_QLF_S_D;
486 /* Index encoded in "Q". */
487 info->reglist.index = QSsize >> 3;
494 info->reglist.has_index = 1;
495 info->reglist.num_regs = 0;
496 /* Number of registers is equal to the number of elements in
497 each structure to be loaded/stored. */
498 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
499 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
504 /* Decode fields immh:immb and/or Q for e.g.
505 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
506 or SSHR <V><d>, <V><n>, #<shift>. */
509 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 aarch64_opnd_info *info, const aarch64_insn code,
511 const aarch64_inst *inst)
514 aarch64_insn Q, imm, immh;
515 enum aarch64_insn_class iclass = inst->opcode->iclass;
517 immh = extract_field (FLD_immh, code, 0);
520 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
522 /* Get highest set bit in immh. */
523 while (--pos >= 0 && (immh & 0x8) == 0)
526 assert ((iclass == asimdshf || iclass == asisdshf)
527 && (info->type == AARCH64_OPND_IMM_VLSR
528 || info->type == AARCH64_OPND_IMM_VLSL));
530 if (iclass == asimdshf)
532 Q = extract_field (FLD_Q, code, 0);
534 0000 x SEE AdvSIMD modified immediate
544 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
547 info->qualifier = get_sreg_qualifier_from_value (pos);
549 if (info->type == AARCH64_OPND_IMM_VLSR)
551 0000 SEE AdvSIMD modified immediate
552 0001 (16-UInt(immh:immb))
553 001x (32-UInt(immh:immb))
554 01xx (64-UInt(immh:immb))
555 1xxx (128-UInt(immh:immb)) */
556 info->imm.value = (16 << pos) - imm;
560 0000 SEE AdvSIMD modified immediate
561 0001 (UInt(immh:immb)-8)
562 001x (UInt(immh:immb)-16)
563 01xx (UInt(immh:immb)-32)
564 1xxx (UInt(immh:immb)-64) */
565 info->imm.value = imm - (8 << pos);
570 /* Decode shift immediate for e.g. sshr (imm). */
572 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
573 aarch64_opnd_info *info, const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED)
578 val = extract_field (FLD_size, code, 0);
581 case 0: imm = 8; break;
582 case 1: imm = 16; break;
583 case 2: imm = 32; break;
586 info->imm.value = imm;
590 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
591 value in the field(s) will be extracted as unsigned immediate value. */
593 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
594 const aarch64_insn code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
599 imm = extract_all_fields (self, code);
601 if (operand_need_sign_extension (self))
602 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
604 if (operand_need_shift_by_two (self))
607 if (info->type == AARCH64_OPND_ADDR_ADRP)
610 info->imm.value = imm;
614 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
616 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
617 const aarch64_insn code,
618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
620 aarch64_ext_imm (self, info, code, inst);
621 info->shifter.kind = AARCH64_MOD_LSL;
622 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
626 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
627 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
629 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
630 aarch64_opnd_info *info,
631 const aarch64_insn code,
632 const aarch64_inst *inst ATTRIBUTE_UNUSED)
635 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
636 aarch64_field field = {0, 0};
638 assert (info->idx == 1);
640 if (info->type == AARCH64_OPND_SIMD_FPIMM)
643 /* a:b:c:d:e:f:g:h */
644 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
645 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
647 /* Either MOVI <Dd>, #<imm>
648 or MOVI <Vd>.2D, #<imm>.
649 <imm> is a 64-bit immediate
650 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
651 encoded in "a:b:c:d:e:f:g:h". */
653 unsigned abcdefgh = imm;
654 for (imm = 0ull, i = 0; i < 8; i++)
655 if (((abcdefgh >> i) & 0x1) != 0)
656 imm |= 0xffull << (8 * i);
658 info->imm.value = imm;
661 info->qualifier = get_expected_qualifier (inst, info->idx);
662 switch (info->qualifier)
664 case AARCH64_OPND_QLF_NIL:
666 info->shifter.kind = AARCH64_MOD_NONE;
668 case AARCH64_OPND_QLF_LSL:
670 info->shifter.kind = AARCH64_MOD_LSL;
671 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
673 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
674 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
675 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
676 default: assert (0); return 0;
678 /* 00: 0; 01: 8; 10:16; 11:24. */
679 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
681 case AARCH64_OPND_QLF_MSL:
683 info->shifter.kind = AARCH64_MOD_MSL;
684 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
685 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
695 /* Decode an 8-bit floating-point immediate. */
697 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701 info->imm.value = extract_all_fields (self, code);
706 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
708 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 aarch64_opnd_info *info, const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
712 info->imm.value = 64- extract_field (FLD_scale, code, 0);
716 /* Decode arithmetic immediate for e.g.
717 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
719 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
720 aarch64_opnd_info *info, const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
725 info->shifter.kind = AARCH64_MOD_LSL;
727 value = extract_field (FLD_shift, code, 0);
730 info->shifter.amount = value ? 12 : 0;
731 /* imm12 (unsigned) */
732 info->imm.value = extract_field (FLD_imm12, code, 0);
737 /* Return true if VALUE is a valid logical immediate encoding, storing the
738 decoded value in *RESULT if so. ESIZE is the number of bytes in the
739 decoded immediate. */
741 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
747 /* value is N:immr:imms. */
749 R = (value >> 6) & 0x3f;
750 N = (value >> 12) & 0x1;
752 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
753 (in other words, right rotated by R), then replicated. */
757 mask = 0xffffffffffffffffull;
763 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
764 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
765 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
766 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
767 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
770 mask = (1ull << simd_size) - 1;
771 /* Top bits are IGNORED. */
775 if (simd_size > esize * 8)
778 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
779 if (S == simd_size - 1)
781 /* S+1 consecutive bits to 1. */
782 /* NOTE: S can't be 63 due to detection above. */
783 imm = (1ull << (S + 1)) - 1;
784 /* Rotate to the left by simd_size - R. */
786 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
787 /* Replicate the value according to SIMD size. */
790 case 2: imm = (imm << 2) | imm;
791 case 4: imm = (imm << 4) | imm;
792 case 8: imm = (imm << 8) | imm;
793 case 16: imm = (imm << 16) | imm;
794 case 32: imm = (imm << 32) | imm;
796 default: assert (0); return 0;
799 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
804 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
806 aarch64_ext_limm (const aarch64_operand *self,
807 aarch64_opnd_info *info, const aarch64_insn code,
808 const aarch64_inst *inst)
813 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
815 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
816 return decode_limm (esize, value, &info->imm.value);
819 /* Decode a logical immediate for the BIC alias of AND (etc.). */
821 aarch64_ext_inv_limm (const aarch64_operand *self,
822 aarch64_opnd_info *info, const aarch64_insn code,
823 const aarch64_inst *inst)
825 if (!aarch64_ext_limm (self, info, code, inst))
827 info->imm.value = ~info->imm.value;
831 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
832 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
834 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
835 aarch64_opnd_info *info,
836 const aarch64_insn code, const aarch64_inst *inst)
841 info->reg.regno = extract_field (FLD_Rt, code, 0);
844 value = extract_field (FLD_ldst_size, code, 0);
845 if (inst->opcode->iclass == ldstpair_indexed
846 || inst->opcode->iclass == ldstnapair_offs
847 || inst->opcode->iclass == ldstpair_off
848 || inst->opcode->iclass == loadlit)
850 enum aarch64_opnd_qualifier qualifier;
853 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
854 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
855 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
858 info->qualifier = qualifier;
863 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
866 info->qualifier = get_sreg_qualifier_from_value (value);
872 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
874 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
875 aarch64_opnd_info *info,
877 const aarch64_inst *inst ATTRIBUTE_UNUSED)
880 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
884 /* Decode the address operand for e.g.
885 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
887 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
888 aarch64_opnd_info *info,
889 aarch64_insn code, const aarch64_inst *inst)
891 aarch64_insn S, value;
894 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
896 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
898 value = extract_field (FLD_option, code, 0);
900 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
901 /* Fix-up the shifter kind; although the table-driven approach is
902 efficient, it is slightly inflexible, thus needing this fix-up. */
903 if (info->shifter.kind == AARCH64_MOD_UXTX)
904 info->shifter.kind = AARCH64_MOD_LSL;
906 S = extract_field (FLD_S, code, 0);
909 info->shifter.amount = 0;
910 info->shifter.amount_present = 0;
915 /* Need information in other operand(s) to help achieve the decoding
917 info->qualifier = get_expected_qualifier (inst, info->idx);
918 /* Get the size of the data element that is accessed, which may be
919 different from that of the source register size, e.g. in strb/ldrb. */
920 size = aarch64_get_qualifier_esize (info->qualifier);
921 info->shifter.amount = get_logsz (size);
922 info->shifter.amount_present = 1;
928 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
930 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
931 aarch64_insn code, const aarch64_inst *inst)
934 info->qualifier = get_expected_qualifier (inst, info->idx);
937 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
938 /* simm (imm9 or imm7) */
939 imm = extract_field (self->fields[0], code, 0);
940 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
941 if (self->fields[0] == FLD_imm7)
942 /* scaled immediate in ld/st pair instructions. */
943 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
945 if (inst->opcode->iclass == ldst_unscaled
946 || inst->opcode->iclass == ldstnapair_offs
947 || inst->opcode->iclass == ldstpair_off
948 || inst->opcode->iclass == ldst_unpriv)
949 info->addr.writeback = 0;
952 /* pre/post- index */
953 info->addr.writeback = 1;
954 if (extract_field (self->fields[1], code, 0) == 1)
955 info->addr.preind = 1;
957 info->addr.postind = 1;
963 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
965 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
967 const aarch64_inst *inst ATTRIBUTE_UNUSED)
970 info->qualifier = get_expected_qualifier (inst, info->idx);
971 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
973 info->addr.base_regno = extract_field (self->fields[0], code, 0);
975 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
979 /* Decode the address operand for e.g.
980 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
982 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
983 aarch64_opnd_info *info,
984 aarch64_insn code, const aarch64_inst *inst)
986 /* The opcode dependent area stores the number of elements in
987 each structure to be loaded/stored. */
988 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
991 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
993 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
994 if (info->addr.offset.regno == 31)
996 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
997 /* Special handling of loading single structure to all lane. */
998 info->addr.offset.imm = (is_ld1r ? 1
999 : inst->operands[0].reglist.num_regs)
1000 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1002 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1003 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1004 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1007 info->addr.offset.is_reg = 1;
1008 info->addr.writeback = 1;
1013 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1015 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1016 aarch64_opnd_info *info,
1017 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1021 value = extract_field (FLD_cond, code, 0);
1022 info->cond = get_cond_from_value (value);
1026 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1028 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1029 aarch64_opnd_info *info,
1031 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1033 /* op0:op1:CRn:CRm:op2 */
1034 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1039 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1041 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1042 aarch64_opnd_info *info, aarch64_insn code,
1043 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1047 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1048 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1049 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1051 /* Reserved value in <pstatefield>. */
1055 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1057 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1058 aarch64_opnd_info *info,
1060 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1064 const aarch64_sys_ins_reg *sysins_ops;
1065 /* op0:op1:CRn:CRm:op2 */
1066 value = extract_fields (code, 0, 5,
1067 FLD_op0, FLD_op1, FLD_CRn,
1072 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1073 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1074 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1075 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1076 default: assert (0); return 0;
1079 for (i = 0; sysins_ops[i].name != NULL; ++i)
1080 if (sysins_ops[i].value == value)
1082 info->sysins_op = sysins_ops + i;
1083 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1084 info->sysins_op->name,
1085 (unsigned)info->sysins_op->value,
1086 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1093 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1096 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1097 aarch64_opnd_info *info,
1099 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1102 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1106 /* Decode the prefetch operation option operand for e.g.
1107 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1110 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1111 aarch64_opnd_info *info,
1112 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1115 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1119 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1120 to the matching name/value pair in aarch64_hint_options. */
1123 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1124 aarch64_opnd_info *info,
1126 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1129 unsigned hint_number;
1132 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1134 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1136 if (hint_number == aarch64_hint_options[i].value)
1138 info->hint_option = &(aarch64_hint_options[i]);
1146 /* Decode the extended register operand for e.g.
1147 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1149 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1150 aarch64_opnd_info *info,
1152 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1157 info->reg.regno = extract_field (FLD_Rm, code, 0);
1159 value = extract_field (FLD_option, code, 0);
1160 info->shifter.kind =
1161 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1163 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1165 /* This makes the constraint checking happy. */
1166 info->shifter.operator_present = 1;
1168 /* Assume inst->operands[0].qualifier has been resolved. */
1169 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1170 info->qualifier = AARCH64_OPND_QLF_W;
1171 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1172 && (info->shifter.kind == AARCH64_MOD_UXTX
1173 || info->shifter.kind == AARCH64_MOD_SXTX))
1174 info->qualifier = AARCH64_OPND_QLF_X;
1179 /* Decode the shifted register operand for e.g.
1180 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1182 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1183 aarch64_opnd_info *info,
1185 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1190 info->reg.regno = extract_field (FLD_Rm, code, 0);
1192 value = extract_field (FLD_shift, code, 0);
1193 info->shifter.kind =
1194 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1195 if (info->shifter.kind == AARCH64_MOD_ROR
1196 && inst->opcode->iclass != log_shift)
1197 /* ROR is not available for the shifted register operand in arithmetic
1201 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1203 /* This makes the constraint checking happy. */
1204 info->shifter.operator_present = 1;
1209 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1210 where <offset> is given by the OFFSET parameter and where <factor> is
1211 1 plus SELF's operand-dependent value. fields[0] specifies the field
1212 that holds <base>. */
1214 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1215 aarch64_opnd_info *info, aarch64_insn code,
1218 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1219 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1220 info->addr.offset.is_reg = FALSE;
1221 info->addr.writeback = FALSE;
1222 info->addr.preind = TRUE;
1224 info->shifter.kind = AARCH64_MOD_MUL_VL;
1225 info->shifter.amount = 1;
1226 info->shifter.operator_present = (info->addr.offset.imm != 0);
1227 info->shifter.amount_present = FALSE;
1231 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1232 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1233 SELF's operand-dependent value. fields[0] specifies the field that
1234 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1236 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1237 aarch64_opnd_info *info, aarch64_insn code,
1238 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1242 offset = extract_field (FLD_SVE_imm4, code, 0);
1243 offset = ((offset + 8) & 15) - 8;
1244 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1247 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1248 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1249 SELF's operand-dependent value. fields[0] specifies the field that
1250 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1252 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1253 aarch64_opnd_info *info, aarch64_insn code,
1254 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1258 offset = extract_field (FLD_SVE_imm6, code, 0);
1259 offset = (((offset + 32) & 63) - 32);
1260 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1263 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1264 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1265 SELF's operand-dependent value. fields[0] specifies the field that
1266 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1267 and imm3 fields, with imm3 being the less-significant part. */
1269 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1270 aarch64_opnd_info *info,
1272 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1276 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1277 offset = (((offset + 256) & 511) - 256);
1278 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1281 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1282 is given by the OFFSET parameter and where <shift> is SELF's operand-
1283 dependent value. fields[0] specifies the base register field <base>. */
1285 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1286 aarch64_opnd_info *info, aarch64_insn code,
1289 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1290 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1291 info->addr.offset.is_reg = FALSE;
1292 info->addr.writeback = FALSE;
1293 info->addr.preind = TRUE;
1294 info->shifter.operator_present = FALSE;
1295 info->shifter.amount_present = FALSE;
1299 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1300 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1301 value. fields[0] specifies the base register field. */
1303 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1304 aarch64_opnd_info *info, aarch64_insn code,
1305 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1307 int offset = extract_field (FLD_SVE_imm6, code, 0);
1308 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1311 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1312 is SELF's operand-dependent value. fields[0] specifies the base
1313 register field and fields[1] specifies the offset register field. */
1315 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1316 aarch64_opnd_info *info, aarch64_insn code,
1317 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1321 index = extract_field (self->fields[1], code, 0);
1322 if (index == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1325 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1326 info->addr.offset.regno = index;
1327 info->addr.offset.is_reg = TRUE;
1328 info->addr.writeback = FALSE;
1329 info->addr.preind = TRUE;
1330 info->shifter.kind = AARCH64_MOD_LSL;
1331 info->shifter.amount = get_operand_specific_data (self);
1332 info->shifter.operator_present = (info->shifter.amount != 0);
1333 info->shifter.amount_present = (info->shifter.amount != 0);
1337 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1338 <shift> is SELF's operand-dependent value. fields[0] specifies the
1339 base register field, fields[1] specifies the offset register field and
1340 fields[2] is a single-bit field that selects SXTW over UXTW. */
1342 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1343 aarch64_opnd_info *info, aarch64_insn code,
1344 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1346 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1347 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1348 info->addr.offset.is_reg = TRUE;
1349 info->addr.writeback = FALSE;
1350 info->addr.preind = TRUE;
1351 if (extract_field (self->fields[2], code, 0))
1352 info->shifter.kind = AARCH64_MOD_SXTW;
1354 info->shifter.kind = AARCH64_MOD_UXTW;
1355 info->shifter.amount = get_operand_specific_data (self);
1356 info->shifter.operator_present = TRUE;
1357 info->shifter.amount_present = (info->shifter.amount != 0);
1361 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1362 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1363 fields[0] specifies the base register field. */
1365 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1366 aarch64_opnd_info *info, aarch64_insn code,
1367 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1369 int offset = extract_field (FLD_imm5, code, 0);
1370 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1373 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1374 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1375 number. fields[0] specifies the base register field and fields[1]
1376 specifies the offset register field. */
1378 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1379 aarch64_insn code, enum aarch64_modifier_kind kind)
1381 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1382 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1383 info->addr.offset.is_reg = TRUE;
1384 info->addr.writeback = FALSE;
1385 info->addr.preind = TRUE;
1386 info->shifter.kind = kind;
1387 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1388 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1389 || info->shifter.amount != 0);
1390 info->shifter.amount_present = (info->shifter.amount != 0);
1394 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1395 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1396 field and fields[1] specifies the offset register field. */
1398 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1399 aarch64_opnd_info *info, aarch64_insn code,
1400 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1402 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1405 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1406 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1407 field and fields[1] specifies the offset register field. */
1409 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1410 aarch64_opnd_info *info, aarch64_insn code,
1411 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1413 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1416 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1417 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1418 field and fields[1] specifies the offset register field. */
1420 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1421 aarch64_opnd_info *info, aarch64_insn code,
1422 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1424 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1427 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1428 has the raw field value and that the low 8 bits decode to VALUE. */
1430 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1432 info->shifter.kind = AARCH64_MOD_LSL;
1433 info->shifter.amount = 0;
1434 if (info->imm.value & 0x100)
1437 /* Decode 0x100 as #0, LSL #8. */
1438 info->shifter.amount = 8;
1442 info->shifter.operator_present = (info->shifter.amount != 0);
1443 info->shifter.amount_present = (info->shifter.amount != 0);
1444 info->imm.value = value;
1448 /* Decode an SVE ADD/SUB immediate. */
1450 aarch64_ext_sve_aimm (const aarch64_operand *self,
1451 aarch64_opnd_info *info, const aarch64_insn code,
1452 const aarch64_inst *inst)
1454 return (aarch64_ext_imm (self, info, code, inst)
1455 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1458 /* Decode an SVE CPY/DUP immediate. */
1460 aarch64_ext_sve_asimm (const aarch64_operand *self,
1461 aarch64_opnd_info *info, const aarch64_insn code,
1462 const aarch64_inst *inst)
1464 return (aarch64_ext_imm (self, info, code, inst)
1465 && decode_sve_aimm (info, (int8_t) info->imm.value));
1468 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1469 The fields array specifies which field to use. */
1471 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1472 aarch64_opnd_info *info, aarch64_insn code,
1473 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1475 if (extract_field (self->fields[0], code, 0))
1476 info->imm.value = 0x3f800000;
1478 info->imm.value = 0x3f000000;
1479 info->imm.is_fp = TRUE;
1483 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1484 The fields array specifies which field to use. */
1486 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1487 aarch64_opnd_info *info, aarch64_insn code,
1488 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1490 if (extract_field (self->fields[0], code, 0))
1491 info->imm.value = 0x40000000;
1493 info->imm.value = 0x3f000000;
1494 info->imm.is_fp = TRUE;
1498 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1499 The fields array specifies which field to use. */
1501 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1502 aarch64_opnd_info *info, aarch64_insn code,
1503 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1505 if (extract_field (self->fields[0], code, 0))
1506 info->imm.value = 0x3f800000;
1508 info->imm.value = 0x0;
1509 info->imm.is_fp = TRUE;
1513 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1514 array specifies which field to use for Zn. MM is encoded in the
1515 concatenation of imm5 and SVE_tszh, with imm5 being the less
1516 significant part. */
1518 aarch64_ext_sve_index (const aarch64_operand *self,
1519 aarch64_opnd_info *info, aarch64_insn code,
1520 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1524 info->reglane.regno = extract_field (self->fields[0], code, 0);
1525 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1526 if ((val & 15) == 0)
1528 while ((val & 1) == 0)
1530 info->reglane.index = val / 2;
1534 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1536 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1537 aarch64_opnd_info *info, const aarch64_insn code,
1538 const aarch64_inst *inst)
1540 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1541 return (aarch64_ext_limm (self, info, code, inst)
1542 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1545 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1546 to use for Zn. The opcode-dependent value specifies the number
1547 of registers in the list. */
1549 aarch64_ext_sve_reglist (const aarch64_operand *self,
1550 aarch64_opnd_info *info, aarch64_insn code,
1551 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1553 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1554 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1558 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1559 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1562 aarch64_ext_sve_scale (const aarch64_operand *self,
1563 aarch64_opnd_info *info, aarch64_insn code,
1564 const aarch64_inst *inst)
1568 if (!aarch64_ext_imm (self, info, code, inst))
1570 val = extract_field (FLD_SVE_imm4, code, 0);
1571 info->shifter.kind = AARCH64_MOD_MUL;
1572 info->shifter.amount = val + 1;
1573 info->shifter.operator_present = (val != 0);
1574 info->shifter.amount_present = (val != 0);
1578 /* Return the top set bit in VALUE, which is expected to be relatively
1581 get_top_bit (uint64_t value)
1583 while ((value & -value) != value)
1584 value -= value & -value;
1588 /* Decode an SVE shift-left immediate. */
1590 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1591 aarch64_opnd_info *info, const aarch64_insn code,
1592 const aarch64_inst *inst)
1594 if (!aarch64_ext_imm (self, info, code, inst)
1595 || info->imm.value == 0)
1598 info->imm.value -= get_top_bit (info->imm.value);
1602 /* Decode an SVE shift-right immediate. */
1604 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1605 aarch64_opnd_info *info, const aarch64_insn code,
1606 const aarch64_inst *inst)
1608 if (!aarch64_ext_imm (self, info, code, inst)
1609 || info->imm.value == 0)
1612 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1616 /* Bitfields that are commonly used to encode certain operands' information
1617 may be partially used as part of the base opcode in some instructions.
1618 For example, the bit 1 of the field 'size' in
1619 FCVTXN <Vb><d>, <Va><n>
1620 is actually part of the base opcode, while only size<0> is available
1621 for encoding the register type. Another example is the AdvSIMD
1622 instruction ORR (register), in which the field 'size' is also used for
1623 the base opcode, leaving only the field 'Q' available to encode the
1624 vector register arrangement specifier '8B' or '16B'.
1626 This function tries to deduce the qualifier from the value of partially
1627 constrained field(s). Given the VALUE of such a field or fields, the
1628 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1629 operand encoding), the function returns the matching qualifier or
1630 AARCH64_OPND_QLF_NIL if nothing matches.
1632 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1633 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1634 may end with AARCH64_OPND_QLF_NIL. */
1636 static enum aarch64_opnd_qualifier
1637 get_qualifier_from_partial_encoding (aarch64_insn value,
1638 const enum aarch64_opnd_qualifier* \
1643 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1644 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1646 aarch64_insn standard_value;
1647 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1649 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1650 if ((standard_value & mask) == (value & mask))
1651 return candidates[i];
1653 return AARCH64_OPND_QLF_NIL;
1656 /* Given a list of qualifier sequences, return all possible valid qualifiers
1657 for operand IDX in QUALIFIERS.
1658 Assume QUALIFIERS is an array whose length is large enough. */
1661 get_operand_possible_qualifiers (int idx,
1662 const aarch64_opnd_qualifier_seq_t *list,
1663 enum aarch64_opnd_qualifier *qualifiers)
1666 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1667 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1671 /* Decode the size Q field for e.g. SHADD.
1672 We tag one operand with the qualifer according to the code;
1673 whether the qualifier is valid for this opcode or not, it is the
1674 duty of the semantic checking. */
1677 decode_sizeq (aarch64_inst *inst)
1680 enum aarch64_opnd_qualifier qualifier;
1682 aarch64_insn value, mask;
1683 enum aarch64_field_kind fld_sz;
1684 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1686 if (inst->opcode->iclass == asisdlse
1687 || inst->opcode->iclass == asisdlsep
1688 || inst->opcode->iclass == asisdlso
1689 || inst->opcode->iclass == asisdlsop)
1690 fld_sz = FLD_vldst_size;
1695 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1696 /* Obtain the info that which bits of fields Q and size are actually
1697 available for operand encoding. Opcodes like FMAXNM and FMLA have
1698 size[1] unavailable. */
1699 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1701 /* The index of the operand we are going to tag a qualifier and the qualifer
1702 itself are reasoned from the value of the size and Q fields and the
1703 possible valid qualifier lists. */
1704 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1705 DEBUG_TRACE ("key idx: %d", idx);
1707 /* For most related instruciton, size:Q are fully available for operand
1711 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1715 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1717 #ifdef DEBUG_AARCH64
1721 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1722 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1723 DEBUG_TRACE ("qualifier %d: %s", i,
1724 aarch64_get_qualifier_name(candidates[i]));
1725 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1727 #endif /* DEBUG_AARCH64 */
1729 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1731 if (qualifier == AARCH64_OPND_QLF_NIL)
1734 inst->operands[idx].qualifier = qualifier;
1738 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1739 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1742 decode_asimd_fcvt (aarch64_inst *inst)
1744 aarch64_field field = {0, 0};
1746 enum aarch64_opnd_qualifier qualifier;
1748 gen_sub_field (FLD_size, 0, 1, &field);
1749 value = extract_field_2 (&field, inst->value, 0);
1750 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1751 : AARCH64_OPND_QLF_V_2D;
1752 switch (inst->opcode->op)
1756 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1757 inst->operands[1].qualifier = qualifier;
1761 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1762 inst->operands[0].qualifier = qualifier;
1772 /* Decode size[0], i.e. bit 22, for
1773 e.g. FCVTXN <Vb><d>, <Va><n>. */
1776 decode_asisd_fcvtxn (aarch64_inst *inst)
1778 aarch64_field field = {0, 0};
1779 gen_sub_field (FLD_size, 0, 1, &field);
1780 if (!extract_field_2 (&field, inst->value, 0))
1782 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1786 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1788 decode_fcvt (aarch64_inst *inst)
1790 enum aarch64_opnd_qualifier qualifier;
1792 const aarch64_field field = {15, 2};
1795 value = extract_field_2 (&field, inst->value, 0);
1798 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1799 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1800 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1803 inst->operands[0].qualifier = qualifier;
1808 /* Do miscellaneous decodings that are not common enough to be driven by
1812 do_misc_decoding (aarch64_inst *inst)
1815 switch (inst->opcode->op)
1818 return decode_fcvt (inst);
1824 return decode_asimd_fcvt (inst);
1827 return decode_asisd_fcvtxn (inst);
1831 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1832 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1833 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1836 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1837 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1840 /* Index must be zero. */
1841 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1842 return value == 1 || value == 2 || value == 4 || value == 8;
1845 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1846 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1849 /* Index must be nonzero. */
1850 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1851 return value != 1 && value != 2 && value != 4 && value != 8;
1854 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1855 == extract_field (FLD_SVE_Pm, inst->value, 0));
1857 case OP_MOVZS_P_P_P:
1859 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1860 == extract_field (FLD_SVE_Pm, inst->value, 0));
1862 case OP_NOTS_P_P_P_Z:
1863 case OP_NOT_P_P_P_Z:
1864 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1865 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1872 /* Opcodes that have fields shared by multiple operands are usually flagged
1873 with flags. In this function, we detect such flags, decode the related
1874 field(s) and store the information in one of the related operands. The
1875 'one' operand is not any operand but one of the operands that can
1876 accommadate all the information that has been decoded. */
1879 do_special_decoding (aarch64_inst *inst)
1883 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1884 if (inst->opcode->flags & F_COND)
1886 value = extract_field (FLD_cond2, inst->value, 0);
1887 inst->cond = get_cond_from_value (value);
1890 if (inst->opcode->flags & F_SF)
1892 idx = select_operand_for_sf_field_coding (inst->opcode);
1893 value = extract_field (FLD_sf, inst->value, 0);
1894 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1895 if ((inst->opcode->flags & F_N)
1896 && extract_field (FLD_N, inst->value, 0) != value)
1900 if (inst->opcode->flags & F_LSE_SZ)
1902 idx = select_operand_for_sf_field_coding (inst->opcode);
1903 value = extract_field (FLD_lse_sz, inst->value, 0);
1904 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1906 /* size:Q fields. */
1907 if (inst->opcode->flags & F_SIZEQ)
1908 return decode_sizeq (inst);
1910 if (inst->opcode->flags & F_FPTYPE)
1912 idx = select_operand_for_fptype_field_coding (inst->opcode);
1913 value = extract_field (FLD_type, inst->value, 0);
1916 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1917 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1918 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1923 if (inst->opcode->flags & F_SSIZE)
1925 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1926 of the base opcode. */
1928 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1929 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1930 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1931 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1932 /* For most related instruciton, the 'size' field is fully available for
1933 operand encoding. */
1935 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1938 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1940 inst->operands[idx].qualifier
1941 = get_qualifier_from_partial_encoding (value, candidates, mask);
1945 if (inst->opcode->flags & F_T)
1947 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1950 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1951 == AARCH64_OPND_CLASS_SIMD_REG);
1962 val = extract_field (FLD_imm5, inst->value, 0);
1963 while ((val & 0x1) == 0 && ++num <= 3)
1967 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1968 inst->operands[0].qualifier =
1969 get_vreg_qualifier_from_value ((num << 1) | Q);
1972 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1974 /* Use Rt to encode in the case of e.g.
1975 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1976 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1979 /* Otherwise use the result operand, which has to be a integer
1981 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1982 == AARCH64_OPND_CLASS_INT_REG);
1985 assert (idx == 0 || idx == 1);
1986 value = extract_field (FLD_Q, inst->value, 0);
1987 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1990 if (inst->opcode->flags & F_LDS_SIZE)
1992 aarch64_field field = {0, 0};
1993 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1994 == AARCH64_OPND_CLASS_INT_REG);
1995 gen_sub_field (FLD_opc, 0, 1, &field);
1996 value = extract_field_2 (&field, inst->value, 0);
1997 inst->operands[0].qualifier
1998 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2001 /* Miscellaneous decoding; done as the last step. */
2002 if (inst->opcode->flags & F_MISC)
2003 return do_misc_decoding (inst);
2008 /* Converters converting a real opcode instruction to its alias form. */
2010 /* ROR <Wd>, <Ws>, #<shift>
2012 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2014 convert_extr_to_ror (aarch64_inst *inst)
2016 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2018 copy_operand_info (inst, 2, 3);
2019 inst->operands[3].type = AARCH64_OPND_NIL;
2025 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2027 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2029 convert_shll_to_xtl (aarch64_inst *inst)
2031 if (inst->operands[2].imm.value == 0)
2033 inst->operands[2].type = AARCH64_OPND_NIL;
2040 UBFM <Xd>, <Xn>, #<shift>, #63.
2042 LSR <Xd>, <Xn>, #<shift>. */
2044 convert_bfm_to_sr (aarch64_inst *inst)
2048 imms = inst->operands[3].imm.value;
2049 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2052 inst->operands[3].type = AARCH64_OPND_NIL;
2059 /* Convert MOV to ORR. */
2061 convert_orr_to_mov (aarch64_inst *inst)
2063 /* MOV <Vd>.<T>, <Vn>.<T>
2065 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2066 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2068 inst->operands[2].type = AARCH64_OPND_NIL;
2074 /* When <imms> >= <immr>, the instruction written:
2075 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2077 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2080 convert_bfm_to_bfx (aarch64_inst *inst)
2084 immr = inst->operands[2].imm.value;
2085 imms = inst->operands[3].imm.value;
2089 inst->operands[2].imm.value = lsb;
2090 inst->operands[3].imm.value = imms + 1 - lsb;
2091 /* The two opcodes have different qualifiers for
2092 the immediate operands; reset to help the checking. */
2093 reset_operand_qualifier (inst, 2);
2094 reset_operand_qualifier (inst, 3);
2101 /* When <imms> < <immr>, the instruction written:
2102 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2104 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2107 convert_bfm_to_bfi (aarch64_inst *inst)
2109 int64_t immr, imms, val;
2111 immr = inst->operands[2].imm.value;
2112 imms = inst->operands[3].imm.value;
2113 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2116 inst->operands[2].imm.value = (val - immr) & (val - 1);
2117 inst->operands[3].imm.value = imms + 1;
2118 /* The two opcodes have different qualifiers for
2119 the immediate operands; reset to help the checking. */
2120 reset_operand_qualifier (inst, 2);
2121 reset_operand_qualifier (inst, 3);
2128 /* The instruction written:
2129 BFC <Xd>, #<lsb>, #<width>
2131 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2134 convert_bfm_to_bfc (aarch64_inst *inst)
2136 int64_t immr, imms, val;
2138 /* Should have been assured by the base opcode value. */
2139 assert (inst->operands[1].reg.regno == 0x1f);
2141 immr = inst->operands[2].imm.value;
2142 imms = inst->operands[3].imm.value;
2143 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2146 /* Drop XZR from the second operand. */
2147 copy_operand_info (inst, 1, 2);
2148 copy_operand_info (inst, 2, 3);
2149 inst->operands[3].type = AARCH64_OPND_NIL;
2151 /* Recalculate the immediates. */
2152 inst->operands[1].imm.value = (val - immr) & (val - 1);
2153 inst->operands[2].imm.value = imms + 1;
2155 /* The two opcodes have different qualifiers for the operands; reset to
2156 help the checking. */
2157 reset_operand_qualifier (inst, 1);
2158 reset_operand_qualifier (inst, 2);
2159 reset_operand_qualifier (inst, 3);
2167 /* The instruction written:
2168 LSL <Xd>, <Xn>, #<shift>
2170 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2173 convert_ubfm_to_lsl (aarch64_inst *inst)
2175 int64_t immr = inst->operands[2].imm.value;
2176 int64_t imms = inst->operands[3].imm.value;
2178 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2180 if ((immr == 0 && imms == val) || immr == imms + 1)
2182 inst->operands[3].type = AARCH64_OPND_NIL;
2183 inst->operands[2].imm.value = val - imms;
2190 /* CINC <Wd>, <Wn>, <cond>
2192 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2193 where <cond> is not AL or NV. */
2196 convert_from_csel (aarch64_inst *inst)
2198 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2199 && (inst->operands[3].cond->value & 0xe) != 0xe)
2201 copy_operand_info (inst, 2, 3);
2202 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2203 inst->operands[3].type = AARCH64_OPND_NIL;
2209 /* CSET <Wd>, <cond>
2211 CSINC <Wd>, WZR, WZR, invert(<cond>)
2212 where <cond> is not AL or NV. */
2215 convert_csinc_to_cset (aarch64_inst *inst)
2217 if (inst->operands[1].reg.regno == 0x1f
2218 && inst->operands[2].reg.regno == 0x1f
2219 && (inst->operands[3].cond->value & 0xe) != 0xe)
2221 copy_operand_info (inst, 1, 3);
2222 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2223 inst->operands[3].type = AARCH64_OPND_NIL;
2224 inst->operands[2].type = AARCH64_OPND_NIL;
2232 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2234 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2235 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2236 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2237 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2238 machine-instruction mnemonic must be used. */
2241 convert_movewide_to_mov (aarch64_inst *inst)
2243 uint64_t value = inst->operands[1].imm.value;
2244 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2245 if (value == 0 && inst->operands[1].shifter.amount != 0)
2247 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2248 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2249 value <<= inst->operands[1].shifter.amount;
2250 /* As an alias convertor, it has to be clear that the INST->OPCODE
2251 is the opcode of the real instruction. */
2252 if (inst->opcode->op == OP_MOVN)
2254 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2256 /* A MOVN has an immediate that could be encoded by MOVZ. */
2257 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
2260 inst->operands[1].imm.value = value;
2261 inst->operands[1].shifter.amount = 0;
2267 ORR <Wd>, WZR, #<imm>.
2269 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2270 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2271 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2272 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2273 machine-instruction mnemonic must be used. */
2276 convert_movebitmask_to_mov (aarch64_inst *inst)
2281 /* Should have been assured by the base opcode value. */
2282 assert (inst->operands[1].reg.regno == 0x1f);
2283 copy_operand_info (inst, 1, 2);
2284 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2285 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2286 value = inst->operands[1].imm.value;
2287 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2289 if (inst->operands[0].reg.regno != 0x1f
2290 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
2291 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
2294 inst->operands[2].type = AARCH64_OPND_NIL;
2298 /* Some alias opcodes are disassembled by being converted from their real-form.
2299 N.B. INST->OPCODE is the real opcode rather than the alias. */
2302 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2308 return convert_bfm_to_sr (inst);
2310 return convert_ubfm_to_lsl (inst);
2314 return convert_from_csel (inst);
2317 return convert_csinc_to_cset (inst);
2321 return convert_bfm_to_bfx (inst);
2325 return convert_bfm_to_bfi (inst);
2327 return convert_bfm_to_bfc (inst);
2329 return convert_orr_to_mov (inst);
2330 case OP_MOV_IMM_WIDE:
2331 case OP_MOV_IMM_WIDEN:
2332 return convert_movewide_to_mov (inst);
2333 case OP_MOV_IMM_LOG:
2334 return convert_movebitmask_to_mov (inst);
2336 return convert_extr_to_ror (inst);
2341 return convert_shll_to_xtl (inst);
2347 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2348 aarch64_inst *, int);
2350 /* Given the instruction information in *INST, check if the instruction has
2351 any alias form that can be used to represent *INST. If the answer is yes,
2352 update *INST to be in the form of the determined alias. */
2354 /* In the opcode description table, the following flags are used in opcode
2355 entries to help establish the relations between the real and alias opcodes:
2357 F_ALIAS: opcode is an alias
2358 F_HAS_ALIAS: opcode has alias(es)
2361 F_P3: Disassembly preference priority 1-3 (the larger the
2362 higher). If nothing is specified, it is the priority
2363 0 by default, i.e. the lowest priority.
2365 Although the relation between the machine and the alias instructions are not
2366 explicitly described, it can be easily determined from the base opcode
2367 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2368 description entries:
2370 The mask of an alias opcode must be equal to or a super-set (i.e. more
2371 constrained) of that of the aliased opcode; so is the base opcode value.
2373 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2374 && (opcode->mask & real->mask) == real->mask
2375 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2376 then OPCODE is an alias of, and only of, the REAL instruction
2378 The alias relationship is forced flat-structured to keep related algorithm
2379 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2381 During the disassembling, the decoding decision tree (in
2382 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2383 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2384 not specified), the disassembler will check whether there is any alias
2385 instruction exists for this real instruction. If there is, the disassembler
2386 will try to disassemble the 32-bit binary again using the alias's rule, or
2387 try to convert the IR to the form of the alias. In the case of the multiple
2388 aliases, the aliases are tried one by one from the highest priority
2389 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2390 first succeeds first adopted.
2392 You may ask why there is a need for the conversion of IR from one form to
2393 another in handling certain aliases. This is because on one hand it avoids
2394 adding more operand code to handle unusual encoding/decoding; on other
2395 hand, during the disassembling, the conversion is an effective approach to
2396 check the condition of an alias (as an alias may be adopted only if certain
2397 conditions are met).
2399 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2400 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2401 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2404 determine_disassembling_preference (struct aarch64_inst *inst)
2406 const aarch64_opcode *opcode;
2407 const aarch64_opcode *alias;
2409 opcode = inst->opcode;
2411 /* This opcode does not have an alias, so use itself. */
2412 if (opcode_has_alias (opcode) == FALSE)
2415 alias = aarch64_find_alias_opcode (opcode);
2418 #ifdef DEBUG_AARCH64
2421 const aarch64_opcode *tmp = alias;
2422 printf ("#### LIST orderd: ");
2425 printf ("%s, ", tmp->name);
2426 tmp = aarch64_find_next_alias_opcode (tmp);
2430 #endif /* DEBUG_AARCH64 */
2432 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2434 DEBUG_TRACE ("try %s", alias->name);
2435 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2437 /* An alias can be a pseudo opcode which will never be used in the
2438 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2440 if (pseudo_opcode_p (alias))
2442 DEBUG_TRACE ("skip pseudo %s", alias->name);
2446 if ((inst->value & alias->mask) != alias->opcode)
2448 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2451 /* No need to do any complicated transformation on operands, if the alias
2452 opcode does not have any operand. */
2453 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2455 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2456 aarch64_replace_opcode (inst, alias);
2459 if (alias->flags & F_CONV)
2462 memcpy (©, inst, sizeof (aarch64_inst));
2463 /* ALIAS is the preference as long as the instruction can be
2464 successfully converted to the form of ALIAS. */
2465 if (convert_to_alias (©, alias) == 1)
2467 aarch64_replace_opcode (©, alias);
2468 assert (aarch64_match_operands_constraint (©, NULL));
2469 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2470 memcpy (inst, ©, sizeof (aarch64_inst));
2476 /* Directly decode the alias opcode. */
2478 memset (&temp, '\0', sizeof (aarch64_inst));
2479 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2481 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2482 memcpy (inst, &temp, sizeof (aarch64_inst));
2489 /* Some instructions (including all SVE ones) use the instruction class
2490 to describe how a qualifiers_list index is represented in the instruction
2491 encoding. If INST is such an instruction, decode the appropriate fields
2492 and fill in the operand qualifiers accordingly. Return true if no
2493 problems are found. */
2496 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2501 switch (inst->opcode->iclass)
2504 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2508 i = extract_field (FLD_SVE_tsz, inst->value, 0);
2511 while ((i & 1) == 0)
2519 /* Pick the smallest applicable element size. */
2520 if ((inst->value & 0x20600) == 0x600)
2522 else if ((inst->value & 0x20400) == 0x400)
2524 else if ((inst->value & 0x20000) == 0)
2531 /* sve_misc instructions have only a single variant. */
2535 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2539 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2542 case sve_shift_pred:
2543 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2554 case sve_shift_unpred:
2555 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2559 variant = extract_field (FLD_size, inst->value, 0);
2565 variant = extract_field (FLD_size, inst->value, 0);
2569 i = extract_field (FLD_size, inst->value, 0);
2576 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2580 /* No mapping between instruction class and qualifiers. */
2584 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2585 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2588 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2589 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2592 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2593 determined and used to disassemble CODE; this is done just before the
2597 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2598 aarch64_inst *inst, int noaliases_p)
2602 DEBUG_TRACE ("enter with %s", opcode->name);
2604 assert (opcode && inst);
2606 /* Check the base opcode. */
2607 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2609 DEBUG_TRACE ("base opcode match FAIL");
2614 memset (inst, '\0', sizeof (aarch64_inst));
2616 inst->opcode = opcode;
2619 /* Assign operand codes and indexes. */
2620 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2622 if (opcode->operands[i] == AARCH64_OPND_NIL)
2624 inst->operands[i].type = opcode->operands[i];
2625 inst->operands[i].idx = i;
2628 /* Call the opcode decoder indicated by flags. */
2629 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2631 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2635 /* Possibly use the instruction class to determine the correct
2637 if (!aarch64_decode_variant_using_iclass (inst))
2639 DEBUG_TRACE ("iclass-based decoder FAIL");
2643 /* Call operand decoders. */
2644 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2646 const aarch64_operand *opnd;
2647 enum aarch64_opnd type;
2649 type = opcode->operands[i];
2650 if (type == AARCH64_OPND_NIL)
2652 opnd = &aarch64_operands[type];
2653 if (operand_has_extractor (opnd)
2654 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2656 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2661 /* If the opcode has a verifier, then check it now. */
2662 if (opcode->verifier && ! opcode->verifier (opcode, code))
2664 DEBUG_TRACE ("operand verifier FAIL");
2668 /* Match the qualifiers. */
2669 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2671 /* Arriving here, the CODE has been determined as a valid instruction
2672 of OPCODE and *INST has been filled with information of this OPCODE
2673 instruction. Before the return, check if the instruction has any
2674 alias and should be disassembled in the form of its alias instead.
2675 If the answer is yes, *INST will be updated. */
2677 determine_disassembling_preference (inst);
2678 DEBUG_TRACE ("SUCCESS");
2683 DEBUG_TRACE ("constraint matching FAIL");
2690 /* This does some user-friendly fix-up to *INST. It is currently focus on
2691 the adjustment of qualifiers to help the printed instruction
2692 recognized/understood more easily. */
2695 user_friendly_fixup (aarch64_inst *inst)
2697 switch (inst->opcode->iclass)
2700 /* TBNZ Xn|Wn, #uimm6, label
2701 Test and Branch Not Zero: conditionally jumps to label if bit number
2702 uimm6 in register Xn is not zero. The bit number implies the width of
2703 the register, which may be written and should be disassembled as Wn if
2704 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2706 if (inst->operands[1].imm.value < 32)
2707 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2713 /* Decode INSN and fill in *INST the instruction information. An alias
2714 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2718 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2719 bfd_boolean noaliases_p)
2721 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2723 #ifdef DEBUG_AARCH64
2726 const aarch64_opcode *tmp = opcode;
2728 DEBUG_TRACE ("opcode lookup:");
2731 aarch64_verbose (" %s", tmp->name);
2732 tmp = aarch64_find_next_opcode (tmp);
2735 #endif /* DEBUG_AARCH64 */
2737 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2738 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2739 opcode field and value, apart from the difference that one of them has an
2740 extra field as part of the opcode, but such a field is used for operand
2741 encoding in other opcode(s) ('immh' in the case of the example). */
2742 while (opcode != NULL)
2744 /* But only one opcode can be decoded successfully for, as the
2745 decoding routine will check the constraint carefully. */
2746 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2748 opcode = aarch64_find_next_opcode (opcode);
2754 /* Print operands. */
2757 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2758 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2760 int i, pcrel_p, num_printed;
2761 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2764 /* We regard the opcode operand info more, however we also look into
2765 the inst->operands to support the disassembling of the optional
2767 The two operand code should be the same in all cases, apart from
2768 when the operand can be optional. */
2769 if (opcode->operands[i] == AARCH64_OPND_NIL
2770 || opnds[i].type == AARCH64_OPND_NIL)
2773 /* Generate the operand string in STR. */
2774 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2777 /* Print the delimiter (taking account of omitted operand(s)). */
2779 (*info->fprintf_func) (info->stream, "%s",
2780 num_printed++ == 0 ? "\t" : ", ");
2782 /* Print the operand. */
2784 (*info->print_address_func) (info->target, info);
2786 (*info->fprintf_func) (info->stream, "%s", str);
2790 /* Print the instruction mnemonic name. */
2793 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2795 if (inst->opcode->flags & F_COND)
2797 /* For instructions that are truly conditionally executed, e.g. b.cond,
2798 prepare the full mnemonic name with the corresponding condition
2803 ptr = strchr (inst->opcode->name, '.');
2804 assert (ptr && inst->cond);
2805 len = ptr - inst->opcode->name;
2807 strncpy (name, inst->opcode->name, len);
2809 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2812 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2815 /* Print the instruction according to *INST. */
2818 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2819 struct disassemble_info *info)
2821 print_mnemonic_name (inst, info);
2822 print_operands (pc, inst->opcode, inst->operands, info);
2825 /* Entry-point of the instruction disassembler and printer. */
2828 print_insn_aarch64_word (bfd_vma pc,
2830 struct disassemble_info *info)
2832 static const char *err_msg[6] =
2835 [-ERR_UND] = "undefined",
2836 [-ERR_UNP] = "unpredictable",
2843 info->insn_info_valid = 1;
2844 info->branch_delay_insns = 0;
2845 info->data_size = 0;
2849 if (info->flags & INSN_HAS_RELOC)
2850 /* If the instruction has a reloc associated with it, then
2851 the offset field in the instruction will actually be the
2852 addend for the reloc. (If we are using REL type relocs).
2853 In such cases, we can ignore the pc when computing
2854 addresses, since the addend is not currently pc-relative. */
2857 ret = aarch64_decode_insn (word, &inst, no_aliases);
2859 if (((word >> 21) & 0x3ff) == 1)
2861 /* RESERVED for ALES. */
2862 assert (ret != ERR_OK);
2871 /* Handle undefined instructions. */
2872 info->insn_type = dis_noninsn;
2873 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2874 word, err_msg[-ret]);
2877 user_friendly_fixup (&inst);
2878 print_aarch64_insn (pc, &inst, info);
2885 /* Disallow mapping symbols ($x, $d etc) from
2886 being displayed in symbol relative addresses. */
2889 aarch64_symbol_is_valid (asymbol * sym,
2890 struct disassemble_info * info ATTRIBUTE_UNUSED)
2897 name = bfd_asymbol_name (sym);
2901 || (name[1] != 'x' && name[1] != 'd')
2902 || (name[2] != '\0' && name[2] != '.'));
2905 /* Print data bytes on INFO->STREAM. */
2908 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2910 struct disassemble_info *info)
2912 switch (info->bytes_per_chunk)
2915 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2918 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2921 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2928 /* Try to infer the code or data type from a symbol.
2929 Returns nonzero if *MAP_TYPE was set. */
2932 get_sym_code_type (struct disassemble_info *info, int n,
2933 enum map_type *map_type)
2935 elf_symbol_type *es;
2939 es = *(elf_symbol_type **)(info->symtab + n);
2940 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2942 /* If the symbol has function type then use that. */
2943 if (type == STT_FUNC)
2945 *map_type = MAP_INSN;
2949 /* Check for mapping symbols. */
2950 name = bfd_asymbol_name(info->symtab[n]);
2952 && (name[1] == 'x' || name[1] == 'd')
2953 && (name[2] == '\0' || name[2] == '.'))
2955 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2962 /* Entry-point of the AArch64 disassembler. */
2965 print_insn_aarch64 (bfd_vma pc,
2966 struct disassemble_info *info)
2968 bfd_byte buffer[INSNLEN];
2970 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2971 bfd_boolean found = FALSE;
2972 unsigned int size = 4;
2975 if (info->disassembler_options)
2977 set_default_aarch64_dis_options (info);
2979 parse_aarch64_dis_options (info->disassembler_options);
2981 /* To avoid repeated parsing of these options, we remove them here. */
2982 info->disassembler_options = NULL;
2985 /* Aarch64 instructions are always little-endian */
2986 info->endian_code = BFD_ENDIAN_LITTLE;
2988 /* First check the full symtab for a mapping symbol, even if there
2989 are no usable non-mapping symbols for this address. */
2990 if (info->symtab_size != 0
2991 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2993 enum map_type type = MAP_INSN;
2998 if (pc <= last_mapping_addr)
2999 last_mapping_sym = -1;
3001 /* Start scanning at the start of the function, or wherever
3002 we finished last time. */
3003 n = info->symtab_pos + 1;
3004 if (n < last_mapping_sym)
3005 n = last_mapping_sym;
3007 /* Scan up to the location being disassembled. */
3008 for (; n < info->symtab_size; n++)
3010 addr = bfd_asymbol_value (info->symtab[n]);
3013 if ((info->section == NULL
3014 || info->section == info->symtab[n]->section)
3015 && get_sym_code_type (info, n, &type))
3024 n = info->symtab_pos;
3025 if (n < last_mapping_sym)
3026 n = last_mapping_sym;
3028 /* No mapping symbol found at this address. Look backwards
3029 for a preceeding one. */
3032 if (get_sym_code_type (info, n, &type))
3041 last_mapping_sym = last_sym;
3044 /* Look a little bit ahead to see if we should print out
3045 less than four bytes of data. If there's a symbol,
3046 mapping or otherwise, after two bytes then don't
3048 if (last_type == MAP_DATA)
3050 size = 4 - (pc & 3);
3051 for (n = last_sym + 1; n < info->symtab_size; n++)
3053 addr = bfd_asymbol_value (info->symtab[n]);
3056 if (addr - pc < size)
3061 /* If the next symbol is after three bytes, we need to
3062 print only part of the data, so that we can use either
3065 size = (pc & 1) ? 1 : 2;
3069 if (last_type == MAP_DATA)
3071 /* size was set above. */
3072 info->bytes_per_chunk = size;
3073 info->display_endian = info->endian;
3074 printer = print_insn_data;
3078 info->bytes_per_chunk = size = INSNLEN;
3079 info->display_endian = info->endian_code;
3080 printer = print_insn_aarch64_word;
3083 status = (*info->read_memory_func) (pc, buffer, size, info);
3086 (*info->memory_error_func) (status, pc, info);
3090 data = bfd_get_bits (buffer, size * 8,
3091 info->display_endian == BFD_ENDIAN_BIG);
3093 (*printer) (pc, data, info);
3099 print_aarch64_disassembler_options (FILE *stream)
3101 fprintf (stream, _("\n\
3102 The following AARCH64 specific disassembler options are supported for use\n\
3103 with the -M switch (multiple options should be separated by commas):\n"));
3105 fprintf (stream, _("\n\
3106 no-aliases Don't print instruction aliases.\n"));
3108 fprintf (stream, _("\n\
3109 aliases Do print instruction aliases.\n"));
3111 #ifdef DEBUG_AARCH64
3112 fprintf (stream, _("\n\
3113 debug_dump Temp switch for debug trace.\n"));
3114 #endif /* DEBUG_AARCH64 */
3116 fprintf (stream, _("\n"));