1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
66 if (CONST_STRNEQ (option, "aliases"))
73 if (CONST_STRNEQ (option, "debug_dump"))
78 #endif /* DEBUG_AARCH64 */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
85 parse_aarch64_dis_options (const char *options)
87 const char *option_end;
92 while (*options != '\0')
94 /* Skip empty options. */
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
106 parse_aarch64_dis_option (options, option_end - options);
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
114 /* Functions doing the instruction disassembling. */
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
135 num = va_arg (va, uint32_t);
137 aarch64_insn value = 0x0;
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
156 enum aarch64_field_kind kind;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
172 uint32_t ret = value;
175 if ((value >> i) & 0x1)
177 uint32_t val = (uint32_t)(-1) << i;
180 return (int32_t) ret;
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
204 /* Instructions using vector type 2H should not call this function. Skip over
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
238 return qualifiers[i];
240 return AARCH64_OPND_QLF_NIL;
243 /* Operand extractors. */
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
250 info->reg.regno = extract_field (self->fields[0], code, 0);
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
259 assert (info->idx == 1
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
265 /* e.g. IC <ic_op>{, <Xt>}. */
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
290 info->reglane.regno = extract_field (self->fields[0], code,
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
328 else if (inst->opcode->iclass == dotproduct)
330 /* Need information in other operand(s) to help decoding. */
331 info->qualifier = get_expected_qualifier (inst, info->idx);
332 switch (info->qualifier)
334 case AARCH64_OPND_QLF_S_B:
336 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
337 info->reglane.regno &= 0x1f;
345 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
346 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
348 /* Need information in other operand(s) to help decoding. */
349 info->qualifier = get_expected_qualifier (inst, info->idx);
350 switch (info->qualifier)
352 case AARCH64_OPND_QLF_S_H:
354 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
356 info->reglane.regno &= 0xf;
358 case AARCH64_OPND_QLF_S_S:
360 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
362 case AARCH64_OPND_QLF_S_D:
364 info->reglane.index = extract_field (FLD_H, code, 0);
370 if (inst->opcode->op == OP_FCMLA_ELEM)
372 /* Complex operand takes two elements. */
373 if (info->reglane.index & 1)
375 info->reglane.index /= 2;
383 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
384 const aarch64_insn code,
385 const aarch64_inst *inst ATTRIBUTE_UNUSED)
388 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
390 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
394 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
396 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
397 aarch64_opnd_info *info, const aarch64_insn code,
398 const aarch64_inst *inst)
401 /* Number of elements in each structure to be loaded/stored. */
402 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
406 unsigned is_reserved;
408 unsigned num_elements;
424 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
426 value = extract_field (FLD_opcode, code, 0);
427 /* PR 21595: Check for a bogus value. */
428 if (value >= ARRAY_SIZE (data))
430 if (expected_num != data[value].num_elements || data[value].is_reserved)
432 info->reglist.num_regs = data[value].num_regs;
437 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
438 lanes instructions. */
440 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst)
447 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
449 value = extract_field (FLD_S, code, 0);
451 /* Number of registers is equal to the number of elements in
452 each structure to be loaded/stored. */
453 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
454 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
456 /* Except when it is LD1R. */
457 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
458 info->reglist.num_regs = 2;
463 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
464 load/store single element instructions. */
466 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
467 aarch64_opnd_info *info, const aarch64_insn code,
468 const aarch64_inst *inst ATTRIBUTE_UNUSED)
470 aarch64_field field = {0, 0};
471 aarch64_insn QSsize; /* fields Q:S:size. */
472 aarch64_insn opcodeh2; /* opcode<2:1> */
475 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
477 /* Decode the index, opcode<2:1> and size. */
478 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
479 opcodeh2 = extract_field_2 (&field, code, 0);
480 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
484 info->qualifier = AARCH64_OPND_QLF_S_B;
485 /* Index encoded in "Q:S:size". */
486 info->reglist.index = QSsize;
492 info->qualifier = AARCH64_OPND_QLF_S_H;
493 /* Index encoded in "Q:S:size<1>". */
494 info->reglist.index = QSsize >> 1;
497 if ((QSsize >> 1) & 0x1)
500 if ((QSsize & 0x1) == 0)
502 info->qualifier = AARCH64_OPND_QLF_S_S;
503 /* Index encoded in "Q:S". */
504 info->reglist.index = QSsize >> 2;
508 if (extract_field (FLD_S, code, 0))
511 info->qualifier = AARCH64_OPND_QLF_S_D;
512 /* Index encoded in "Q". */
513 info->reglist.index = QSsize >> 3;
520 info->reglist.has_index = 1;
521 info->reglist.num_regs = 0;
522 /* Number of registers is equal to the number of elements in
523 each structure to be loaded/stored. */
524 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
525 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
530 /* Decode fields immh:immb and/or Q for e.g.
531 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
532 or SSHR <V><d>, <V><n>, #<shift>. */
535 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
536 aarch64_opnd_info *info, const aarch64_insn code,
537 const aarch64_inst *inst)
540 aarch64_insn Q, imm, immh;
541 enum aarch64_insn_class iclass = inst->opcode->iclass;
543 immh = extract_field (FLD_immh, code, 0);
546 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
548 /* Get highest set bit in immh. */
549 while (--pos >= 0 && (immh & 0x8) == 0)
552 assert ((iclass == asimdshf || iclass == asisdshf)
553 && (info->type == AARCH64_OPND_IMM_VLSR
554 || info->type == AARCH64_OPND_IMM_VLSL));
556 if (iclass == asimdshf)
558 Q = extract_field (FLD_Q, code, 0);
560 0000 x SEE AdvSIMD modified immediate
570 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
573 info->qualifier = get_sreg_qualifier_from_value (pos);
575 if (info->type == AARCH64_OPND_IMM_VLSR)
577 0000 SEE AdvSIMD modified immediate
578 0001 (16-UInt(immh:immb))
579 001x (32-UInt(immh:immb))
580 01xx (64-UInt(immh:immb))
581 1xxx (128-UInt(immh:immb)) */
582 info->imm.value = (16 << pos) - imm;
586 0000 SEE AdvSIMD modified immediate
587 0001 (UInt(immh:immb)-8)
588 001x (UInt(immh:immb)-16)
589 01xx (UInt(immh:immb)-32)
590 1xxx (UInt(immh:immb)-64) */
591 info->imm.value = imm - (8 << pos);
596 /* Decode shift immediate for e.g. sshr (imm). */
598 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
599 aarch64_opnd_info *info, const aarch64_insn code,
600 const aarch64_inst *inst ATTRIBUTE_UNUSED)
604 val = extract_field (FLD_size, code, 0);
607 case 0: imm = 8; break;
608 case 1: imm = 16; break;
609 case 2: imm = 32; break;
612 info->imm.value = imm;
616 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
617 value in the field(s) will be extracted as unsigned immediate value. */
619 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
620 const aarch64_insn code,
621 const aarch64_inst *inst ATTRIBUTE_UNUSED)
625 imm = extract_all_fields (self, code);
627 if (operand_need_sign_extension (self))
628 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
630 if (operand_need_shift_by_two (self))
633 if (info->type == AARCH64_OPND_ADDR_ADRP)
636 info->imm.value = imm;
640 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
642 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
643 const aarch64_insn code,
644 const aarch64_inst *inst ATTRIBUTE_UNUSED)
646 aarch64_ext_imm (self, info, code, inst);
647 info->shifter.kind = AARCH64_MOD_LSL;
648 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
652 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
653 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
655 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 aarch64_opnd_info *info,
657 const aarch64_insn code,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED)
661 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
662 aarch64_field field = {0, 0};
664 assert (info->idx == 1);
666 if (info->type == AARCH64_OPND_SIMD_FPIMM)
669 /* a:b:c:d:e:f:g:h */
670 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
671 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
673 /* Either MOVI <Dd>, #<imm>
674 or MOVI <Vd>.2D, #<imm>.
675 <imm> is a 64-bit immediate
676 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
677 encoded in "a:b:c:d:e:f:g:h". */
679 unsigned abcdefgh = imm;
680 for (imm = 0ull, i = 0; i < 8; i++)
681 if (((abcdefgh >> i) & 0x1) != 0)
682 imm |= 0xffull << (8 * i);
684 info->imm.value = imm;
687 info->qualifier = get_expected_qualifier (inst, info->idx);
688 switch (info->qualifier)
690 case AARCH64_OPND_QLF_NIL:
692 info->shifter.kind = AARCH64_MOD_NONE;
694 case AARCH64_OPND_QLF_LSL:
696 info->shifter.kind = AARCH64_MOD_LSL;
697 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
699 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
700 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
701 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
702 default: assert (0); return 0;
704 /* 00: 0; 01: 8; 10:16; 11:24. */
705 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
707 case AARCH64_OPND_QLF_MSL:
709 info->shifter.kind = AARCH64_MOD_MSL;
710 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
711 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
721 /* Decode an 8-bit floating-point immediate. */
723 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
724 const aarch64_insn code,
725 const aarch64_inst *inst ATTRIBUTE_UNUSED)
727 info->imm.value = extract_all_fields (self, code);
732 /* Decode a 1-bit rotate immediate (#90 or #270). */
734 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
735 const aarch64_insn code,
736 const aarch64_inst *inst ATTRIBUTE_UNUSED)
738 uint64_t rot = extract_field (self->fields[0], code, 0);
740 info->imm.value = rot * 180 + 90;
744 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
746 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
747 const aarch64_insn code,
748 const aarch64_inst *inst ATTRIBUTE_UNUSED)
750 uint64_t rot = extract_field (self->fields[0], code, 0);
752 info->imm.value = rot * 90;
756 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
758 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
759 aarch64_opnd_info *info, const aarch64_insn code,
760 const aarch64_inst *inst ATTRIBUTE_UNUSED)
762 info->imm.value = 64- extract_field (FLD_scale, code, 0);
766 /* Decode arithmetic immediate for e.g.
767 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
769 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
770 aarch64_opnd_info *info, const aarch64_insn code,
771 const aarch64_inst *inst ATTRIBUTE_UNUSED)
775 info->shifter.kind = AARCH64_MOD_LSL;
777 value = extract_field (FLD_shift, code, 0);
780 info->shifter.amount = value ? 12 : 0;
781 /* imm12 (unsigned) */
782 info->imm.value = extract_field (FLD_imm12, code, 0);
787 /* Return true if VALUE is a valid logical immediate encoding, storing the
788 decoded value in *RESULT if so. ESIZE is the number of bytes in the
789 decoded immediate. */
791 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
797 /* value is N:immr:imms. */
799 R = (value >> 6) & 0x3f;
800 N = (value >> 12) & 0x1;
802 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
803 (in other words, right rotated by R), then replicated. */
807 mask = 0xffffffffffffffffull;
813 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
814 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
815 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
816 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
817 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
820 mask = (1ull << simd_size) - 1;
821 /* Top bits are IGNORED. */
825 if (simd_size > esize * 8)
828 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
829 if (S == simd_size - 1)
831 /* S+1 consecutive bits to 1. */
832 /* NOTE: S can't be 63 due to detection above. */
833 imm = (1ull << (S + 1)) - 1;
834 /* Rotate to the left by simd_size - R. */
836 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
837 /* Replicate the value according to SIMD size. */
840 case 2: imm = (imm << 2) | imm;
842 case 4: imm = (imm << 4) | imm;
844 case 8: imm = (imm << 8) | imm;
846 case 16: imm = (imm << 16) | imm;
848 case 32: imm = (imm << 32) | imm;
851 default: assert (0); return 0;
854 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
859 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
861 aarch64_ext_limm (const aarch64_operand *self,
862 aarch64_opnd_info *info, const aarch64_insn code,
863 const aarch64_inst *inst)
868 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
870 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
871 return decode_limm (esize, value, &info->imm.value);
874 /* Decode a logical immediate for the BIC alias of AND (etc.). */
876 aarch64_ext_inv_limm (const aarch64_operand *self,
877 aarch64_opnd_info *info, const aarch64_insn code,
878 const aarch64_inst *inst)
880 if (!aarch64_ext_limm (self, info, code, inst))
882 info->imm.value = ~info->imm.value;
886 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
887 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
889 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
890 aarch64_opnd_info *info,
891 const aarch64_insn code, const aarch64_inst *inst)
896 info->reg.regno = extract_field (FLD_Rt, code, 0);
899 value = extract_field (FLD_ldst_size, code, 0);
900 if (inst->opcode->iclass == ldstpair_indexed
901 || inst->opcode->iclass == ldstnapair_offs
902 || inst->opcode->iclass == ldstpair_off
903 || inst->opcode->iclass == loadlit)
905 enum aarch64_opnd_qualifier qualifier;
908 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
909 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
910 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
913 info->qualifier = qualifier;
918 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
921 info->qualifier = get_sreg_qualifier_from_value (value);
927 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
929 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
930 aarch64_opnd_info *info,
932 const aarch64_inst *inst ATTRIBUTE_UNUSED)
935 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
939 /* Decode the address operand for e.g.
940 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
942 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
943 aarch64_opnd_info *info,
944 aarch64_insn code, const aarch64_inst *inst)
946 aarch64_insn S, value;
949 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
951 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
953 value = extract_field (FLD_option, code, 0);
955 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
956 /* Fix-up the shifter kind; although the table-driven approach is
957 efficient, it is slightly inflexible, thus needing this fix-up. */
958 if (info->shifter.kind == AARCH64_MOD_UXTX)
959 info->shifter.kind = AARCH64_MOD_LSL;
961 S = extract_field (FLD_S, code, 0);
964 info->shifter.amount = 0;
965 info->shifter.amount_present = 0;
970 /* Need information in other operand(s) to help achieve the decoding
972 info->qualifier = get_expected_qualifier (inst, info->idx);
973 /* Get the size of the data element that is accessed, which may be
974 different from that of the source register size, e.g. in strb/ldrb. */
975 size = aarch64_get_qualifier_esize (info->qualifier);
976 info->shifter.amount = get_logsz (size);
977 info->shifter.amount_present = 1;
983 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
985 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
986 aarch64_insn code, const aarch64_inst *inst)
989 info->qualifier = get_expected_qualifier (inst, info->idx);
992 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
993 /* simm (imm9 or imm7) */
994 imm = extract_field (self->fields[0], code, 0);
995 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
996 if (self->fields[0] == FLD_imm7)
997 /* scaled immediate in ld/st pair instructions. */
998 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1000 if (inst->opcode->iclass == ldst_unscaled
1001 || inst->opcode->iclass == ldstnapair_offs
1002 || inst->opcode->iclass == ldstpair_off
1003 || inst->opcode->iclass == ldst_unpriv)
1004 info->addr.writeback = 0;
1007 /* pre/post- index */
1008 info->addr.writeback = 1;
1009 if (extract_field (self->fields[1], code, 0) == 1)
1010 info->addr.preind = 1;
1012 info->addr.postind = 1;
1018 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1020 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1022 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1025 info->qualifier = get_expected_qualifier (inst, info->idx);
1026 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1028 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1030 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1034 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1036 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1038 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1042 info->qualifier = get_expected_qualifier (inst, info->idx);
1044 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1046 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1047 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1048 if (extract_field (self->fields[3], code, 0) == 1) {
1049 info->addr.writeback = 1;
1050 info->addr.preind = 1;
1055 /* Decode the address operand for e.g.
1056 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1058 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1059 aarch64_opnd_info *info,
1060 aarch64_insn code, const aarch64_inst *inst)
1062 /* The opcode dependent area stores the number of elements in
1063 each structure to be loaded/stored. */
1064 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1067 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1068 /* Rm | #<amount> */
1069 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1070 if (info->addr.offset.regno == 31)
1072 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1073 /* Special handling of loading single structure to all lane. */
1074 info->addr.offset.imm = (is_ld1r ? 1
1075 : inst->operands[0].reglist.num_regs)
1076 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1078 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1079 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1080 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1083 info->addr.offset.is_reg = 1;
1084 info->addr.writeback = 1;
1089 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1091 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1092 aarch64_opnd_info *info,
1093 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1097 value = extract_field (FLD_cond, code, 0);
1098 info->cond = get_cond_from_value (value);
1102 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1104 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1105 aarch64_opnd_info *info,
1107 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1109 /* op0:op1:CRn:CRm:op2 */
1110 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1115 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1117 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1118 aarch64_opnd_info *info, aarch64_insn code,
1119 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1123 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1124 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1125 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1127 /* Reserved value in <pstatefield>. */
1131 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1133 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1134 aarch64_opnd_info *info,
1136 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1140 const aarch64_sys_ins_reg *sysins_ops;
1141 /* op0:op1:CRn:CRm:op2 */
1142 value = extract_fields (code, 0, 5,
1143 FLD_op0, FLD_op1, FLD_CRn,
1148 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1149 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1150 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1151 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1152 default: assert (0); return 0;
1155 for (i = 0; sysins_ops[i].name != NULL; ++i)
1156 if (sysins_ops[i].value == value)
1158 info->sysins_op = sysins_ops + i;
1159 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1160 info->sysins_op->name,
1161 (unsigned)info->sysins_op->value,
1162 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1169 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1172 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1173 aarch64_opnd_info *info,
1175 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1178 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1182 /* Decode the prefetch operation option operand for e.g.
1183 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1186 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1187 aarch64_opnd_info *info,
1188 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1191 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1195 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1196 to the matching name/value pair in aarch64_hint_options. */
1199 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1200 aarch64_opnd_info *info,
1202 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1205 unsigned hint_number;
1208 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1210 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1212 if (hint_number == aarch64_hint_options[i].value)
1214 info->hint_option = &(aarch64_hint_options[i]);
1222 /* Decode the extended register operand for e.g.
1223 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1225 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1226 aarch64_opnd_info *info,
1228 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1233 info->reg.regno = extract_field (FLD_Rm, code, 0);
1235 value = extract_field (FLD_option, code, 0);
1236 info->shifter.kind =
1237 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1239 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1241 /* This makes the constraint checking happy. */
1242 info->shifter.operator_present = 1;
1244 /* Assume inst->operands[0].qualifier has been resolved. */
1245 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1246 info->qualifier = AARCH64_OPND_QLF_W;
1247 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1248 && (info->shifter.kind == AARCH64_MOD_UXTX
1249 || info->shifter.kind == AARCH64_MOD_SXTX))
1250 info->qualifier = AARCH64_OPND_QLF_X;
1255 /* Decode the shifted register operand for e.g.
1256 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1258 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1259 aarch64_opnd_info *info,
1261 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1266 info->reg.regno = extract_field (FLD_Rm, code, 0);
1268 value = extract_field (FLD_shift, code, 0);
1269 info->shifter.kind =
1270 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1271 if (info->shifter.kind == AARCH64_MOD_ROR
1272 && inst->opcode->iclass != log_shift)
1273 /* ROR is not available for the shifted register operand in arithmetic
1277 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1279 /* This makes the constraint checking happy. */
1280 info->shifter.operator_present = 1;
1285 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1286 where <offset> is given by the OFFSET parameter and where <factor> is
1287 1 plus SELF's operand-dependent value. fields[0] specifies the field
1288 that holds <base>. */
1290 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1291 aarch64_opnd_info *info, aarch64_insn code,
1294 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1295 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1296 info->addr.offset.is_reg = FALSE;
1297 info->addr.writeback = FALSE;
1298 info->addr.preind = TRUE;
1300 info->shifter.kind = AARCH64_MOD_MUL_VL;
1301 info->shifter.amount = 1;
1302 info->shifter.operator_present = (info->addr.offset.imm != 0);
1303 info->shifter.amount_present = FALSE;
1307 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1308 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1309 SELF's operand-dependent value. fields[0] specifies the field that
1310 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1312 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1313 aarch64_opnd_info *info, aarch64_insn code,
1314 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1318 offset = extract_field (FLD_SVE_imm4, code, 0);
1319 offset = ((offset + 8) & 15) - 8;
1320 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1323 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1324 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1325 SELF's operand-dependent value. fields[0] specifies the field that
1326 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1328 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1329 aarch64_opnd_info *info, aarch64_insn code,
1330 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1334 offset = extract_field (FLD_SVE_imm6, code, 0);
1335 offset = (((offset + 32) & 63) - 32);
1336 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1339 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1340 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1341 SELF's operand-dependent value. fields[0] specifies the field that
1342 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1343 and imm3 fields, with imm3 being the less-significant part. */
1345 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1346 aarch64_opnd_info *info,
1348 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1352 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1353 offset = (((offset + 256) & 511) - 256);
1354 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1357 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1358 is given by the OFFSET parameter and where <shift> is SELF's operand-
1359 dependent value. fields[0] specifies the base register field <base>. */
1361 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1362 aarch64_opnd_info *info, aarch64_insn code,
1365 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1366 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1367 info->addr.offset.is_reg = FALSE;
1368 info->addr.writeback = FALSE;
1369 info->addr.preind = TRUE;
1370 info->shifter.operator_present = FALSE;
1371 info->shifter.amount_present = FALSE;
1375 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1376 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1377 value. fields[0] specifies the base register field. */
1379 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1380 aarch64_opnd_info *info, aarch64_insn code,
1381 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1383 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1384 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1387 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1388 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1389 value. fields[0] specifies the base register field. */
1391 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1392 aarch64_opnd_info *info, aarch64_insn code,
1393 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1395 int offset = extract_field (FLD_SVE_imm6, code, 0);
1396 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1399 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1400 is SELF's operand-dependent value. fields[0] specifies the base
1401 register field and fields[1] specifies the offset register field. */
1403 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1404 aarch64_opnd_info *info, aarch64_insn code,
1405 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1409 index_regno = extract_field (self->fields[1], code, 0);
1410 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1413 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1414 info->addr.offset.regno = index_regno;
1415 info->addr.offset.is_reg = TRUE;
1416 info->addr.writeback = FALSE;
1417 info->addr.preind = TRUE;
1418 info->shifter.kind = AARCH64_MOD_LSL;
1419 info->shifter.amount = get_operand_specific_data (self);
1420 info->shifter.operator_present = (info->shifter.amount != 0);
1421 info->shifter.amount_present = (info->shifter.amount != 0);
1425 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1426 <shift> is SELF's operand-dependent value. fields[0] specifies the
1427 base register field, fields[1] specifies the offset register field and
1428 fields[2] is a single-bit field that selects SXTW over UXTW. */
1430 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1431 aarch64_opnd_info *info, aarch64_insn code,
1432 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1434 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1435 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1436 info->addr.offset.is_reg = TRUE;
1437 info->addr.writeback = FALSE;
1438 info->addr.preind = TRUE;
1439 if (extract_field (self->fields[2], code, 0))
1440 info->shifter.kind = AARCH64_MOD_SXTW;
1442 info->shifter.kind = AARCH64_MOD_UXTW;
1443 info->shifter.amount = get_operand_specific_data (self);
1444 info->shifter.operator_present = TRUE;
1445 info->shifter.amount_present = (info->shifter.amount != 0);
1449 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1450 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1451 fields[0] specifies the base register field. */
1453 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1454 aarch64_opnd_info *info, aarch64_insn code,
1455 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1457 int offset = extract_field (FLD_imm5, code, 0);
1458 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1461 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1462 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1463 number. fields[0] specifies the base register field and fields[1]
1464 specifies the offset register field. */
1466 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1467 aarch64_insn code, enum aarch64_modifier_kind kind)
1469 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1470 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1471 info->addr.offset.is_reg = TRUE;
1472 info->addr.writeback = FALSE;
1473 info->addr.preind = TRUE;
1474 info->shifter.kind = kind;
1475 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1476 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1477 || info->shifter.amount != 0);
1478 info->shifter.amount_present = (info->shifter.amount != 0);
1482 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1483 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1484 field and fields[1] specifies the offset register field. */
1486 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1487 aarch64_opnd_info *info, aarch64_insn code,
1488 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1490 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1493 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1494 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1495 field and fields[1] specifies the offset register field. */
1497 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1498 aarch64_opnd_info *info, aarch64_insn code,
1499 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1501 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1504 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1505 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1506 field and fields[1] specifies the offset register field. */
1508 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1509 aarch64_opnd_info *info, aarch64_insn code,
1510 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1512 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1515 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1516 has the raw field value and that the low 8 bits decode to VALUE. */
1518 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1520 info->shifter.kind = AARCH64_MOD_LSL;
1521 info->shifter.amount = 0;
1522 if (info->imm.value & 0x100)
1525 /* Decode 0x100 as #0, LSL #8. */
1526 info->shifter.amount = 8;
1530 info->shifter.operator_present = (info->shifter.amount != 0);
1531 info->shifter.amount_present = (info->shifter.amount != 0);
1532 info->imm.value = value;
1536 /* Decode an SVE ADD/SUB immediate. */
1538 aarch64_ext_sve_aimm (const aarch64_operand *self,
1539 aarch64_opnd_info *info, const aarch64_insn code,
1540 const aarch64_inst *inst)
1542 return (aarch64_ext_imm (self, info, code, inst)
1543 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1546 /* Decode an SVE CPY/DUP immediate. */
1548 aarch64_ext_sve_asimm (const aarch64_operand *self,
1549 aarch64_opnd_info *info, const aarch64_insn code,
1550 const aarch64_inst *inst)
1552 return (aarch64_ext_imm (self, info, code, inst)
1553 && decode_sve_aimm (info, (int8_t) info->imm.value));
1556 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1557 The fields array specifies which field to use. */
1559 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1560 aarch64_opnd_info *info, aarch64_insn code,
1561 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1563 if (extract_field (self->fields[0], code, 0))
1564 info->imm.value = 0x3f800000;
1566 info->imm.value = 0x3f000000;
1567 info->imm.is_fp = TRUE;
1571 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1572 The fields array specifies which field to use. */
1574 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1575 aarch64_opnd_info *info, aarch64_insn code,
1576 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1578 if (extract_field (self->fields[0], code, 0))
1579 info->imm.value = 0x40000000;
1581 info->imm.value = 0x3f000000;
1582 info->imm.is_fp = TRUE;
1586 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1587 The fields array specifies which field to use. */
1589 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1590 aarch64_opnd_info *info, aarch64_insn code,
1591 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1593 if (extract_field (self->fields[0], code, 0))
1594 info->imm.value = 0x3f800000;
1596 info->imm.value = 0x0;
1597 info->imm.is_fp = TRUE;
1601 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1602 array specifies which field to use for Zn. MM is encoded in the
1603 concatenation of imm5 and SVE_tszh, with imm5 being the less
1604 significant part. */
1606 aarch64_ext_sve_index (const aarch64_operand *self,
1607 aarch64_opnd_info *info, aarch64_insn code,
1608 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1612 info->reglane.regno = extract_field (self->fields[0], code, 0);
1613 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1614 if ((val & 31) == 0)
1616 while ((val & 1) == 0)
1618 info->reglane.index = val / 2;
1622 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1624 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1625 aarch64_opnd_info *info, const aarch64_insn code,
1626 const aarch64_inst *inst)
1628 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1629 return (aarch64_ext_limm (self, info, code, inst)
1630 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1633 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1634 and where MM occupies the most-significant part. The operand-dependent
1635 value specifies the number of bits in Zn. */
1637 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1638 aarch64_opnd_info *info, aarch64_insn code,
1639 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1641 unsigned int reg_bits = get_operand_specific_data (self);
1642 unsigned int val = extract_all_fields (self, code);
1643 info->reglane.regno = val & ((1 << reg_bits) - 1);
1644 info->reglane.index = val >> reg_bits;
1648 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1649 to use for Zn. The opcode-dependent value specifies the number
1650 of registers in the list. */
1652 aarch64_ext_sve_reglist (const aarch64_operand *self,
1653 aarch64_opnd_info *info, aarch64_insn code,
1654 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1656 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1657 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1661 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1662 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1665 aarch64_ext_sve_scale (const aarch64_operand *self,
1666 aarch64_opnd_info *info, aarch64_insn code,
1667 const aarch64_inst *inst)
1671 if (!aarch64_ext_imm (self, info, code, inst))
1673 val = extract_field (FLD_SVE_imm4, code, 0);
1674 info->shifter.kind = AARCH64_MOD_MUL;
1675 info->shifter.amount = val + 1;
1676 info->shifter.operator_present = (val != 0);
1677 info->shifter.amount_present = (val != 0);
1681 /* Return the top set bit in VALUE, which is expected to be relatively
1684 get_top_bit (uint64_t value)
1686 while ((value & -value) != value)
1687 value -= value & -value;
1691 /* Decode an SVE shift-left immediate. */
1693 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1694 aarch64_opnd_info *info, const aarch64_insn code,
1695 const aarch64_inst *inst)
1697 if (!aarch64_ext_imm (self, info, code, inst)
1698 || info->imm.value == 0)
1701 info->imm.value -= get_top_bit (info->imm.value);
1705 /* Decode an SVE shift-right immediate. */
1707 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1708 aarch64_opnd_info *info, const aarch64_insn code,
1709 const aarch64_inst *inst)
1711 if (!aarch64_ext_imm (self, info, code, inst)
1712 || info->imm.value == 0)
1715 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1719 /* Bitfields that are commonly used to encode certain operands' information
1720 may be partially used as part of the base opcode in some instructions.
1721 For example, the bit 1 of the field 'size' in
1722 FCVTXN <Vb><d>, <Va><n>
1723 is actually part of the base opcode, while only size<0> is available
1724 for encoding the register type. Another example is the AdvSIMD
1725 instruction ORR (register), in which the field 'size' is also used for
1726 the base opcode, leaving only the field 'Q' available to encode the
1727 vector register arrangement specifier '8B' or '16B'.
1729 This function tries to deduce the qualifier from the value of partially
1730 constrained field(s). Given the VALUE of such a field or fields, the
1731 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1732 operand encoding), the function returns the matching qualifier or
1733 AARCH64_OPND_QLF_NIL if nothing matches.
1735 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1736 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1737 may end with AARCH64_OPND_QLF_NIL. */
1739 static enum aarch64_opnd_qualifier
1740 get_qualifier_from_partial_encoding (aarch64_insn value,
1741 const enum aarch64_opnd_qualifier* \
1746 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1747 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1749 aarch64_insn standard_value;
1750 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1752 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1753 if ((standard_value & mask) == (value & mask))
1754 return candidates[i];
1756 return AARCH64_OPND_QLF_NIL;
1759 /* Given a list of qualifier sequences, return all possible valid qualifiers
1760 for operand IDX in QUALIFIERS.
1761 Assume QUALIFIERS is an array whose length is large enough. */
1764 get_operand_possible_qualifiers (int idx,
1765 const aarch64_opnd_qualifier_seq_t *list,
1766 enum aarch64_opnd_qualifier *qualifiers)
1769 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1770 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1774 /* Decode the size Q field for e.g. SHADD.
1775 We tag one operand with the qualifer according to the code;
1776 whether the qualifier is valid for this opcode or not, it is the
1777 duty of the semantic checking. */
1780 decode_sizeq (aarch64_inst *inst)
1783 enum aarch64_opnd_qualifier qualifier;
1785 aarch64_insn value, mask;
1786 enum aarch64_field_kind fld_sz;
1787 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1789 if (inst->opcode->iclass == asisdlse
1790 || inst->opcode->iclass == asisdlsep
1791 || inst->opcode->iclass == asisdlso
1792 || inst->opcode->iclass == asisdlsop)
1793 fld_sz = FLD_vldst_size;
1798 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1799 /* Obtain the info that which bits of fields Q and size are actually
1800 available for operand encoding. Opcodes like FMAXNM and FMLA have
1801 size[1] unavailable. */
1802 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1804 /* The index of the operand we are going to tag a qualifier and the qualifer
1805 itself are reasoned from the value of the size and Q fields and the
1806 possible valid qualifier lists. */
1807 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1808 DEBUG_TRACE ("key idx: %d", idx);
1810 /* For most related instruciton, size:Q are fully available for operand
1814 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1818 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1820 #ifdef DEBUG_AARCH64
1824 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1825 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1826 DEBUG_TRACE ("qualifier %d: %s", i,
1827 aarch64_get_qualifier_name(candidates[i]));
1828 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1830 #endif /* DEBUG_AARCH64 */
1832 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1834 if (qualifier == AARCH64_OPND_QLF_NIL)
1837 inst->operands[idx].qualifier = qualifier;
1841 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1842 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1845 decode_asimd_fcvt (aarch64_inst *inst)
1847 aarch64_field field = {0, 0};
1849 enum aarch64_opnd_qualifier qualifier;
1851 gen_sub_field (FLD_size, 0, 1, &field);
1852 value = extract_field_2 (&field, inst->value, 0);
1853 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1854 : AARCH64_OPND_QLF_V_2D;
1855 switch (inst->opcode->op)
1859 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1860 inst->operands[1].qualifier = qualifier;
1864 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1865 inst->operands[0].qualifier = qualifier;
1875 /* Decode size[0], i.e. bit 22, for
1876 e.g. FCVTXN <Vb><d>, <Va><n>. */
1879 decode_asisd_fcvtxn (aarch64_inst *inst)
1881 aarch64_field field = {0, 0};
1882 gen_sub_field (FLD_size, 0, 1, &field);
1883 if (!extract_field_2 (&field, inst->value, 0))
1885 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1889 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1891 decode_fcvt (aarch64_inst *inst)
1893 enum aarch64_opnd_qualifier qualifier;
1895 const aarch64_field field = {15, 2};
1898 value = extract_field_2 (&field, inst->value, 0);
1901 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1902 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1903 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1906 inst->operands[0].qualifier = qualifier;
1911 /* Do miscellaneous decodings that are not common enough to be driven by
1915 do_misc_decoding (aarch64_inst *inst)
1918 switch (inst->opcode->op)
1921 return decode_fcvt (inst);
1927 return decode_asimd_fcvt (inst);
1930 return decode_asisd_fcvtxn (inst);
1934 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1935 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1936 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1939 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1940 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1943 /* Index must be zero. */
1944 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1945 return value > 0 && value <= 16 && value == (value & -value);
1948 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1949 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1952 /* Index must be nonzero. */
1953 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1954 return value > 0 && value != (value & -value);
1957 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1958 == extract_field (FLD_SVE_Pm, inst->value, 0));
1960 case OP_MOVZS_P_P_P:
1962 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1963 == extract_field (FLD_SVE_Pm, inst->value, 0));
1965 case OP_NOTS_P_P_P_Z:
1966 case OP_NOT_P_P_P_Z:
1967 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1968 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1975 /* Opcodes that have fields shared by multiple operands are usually flagged
1976 with flags. In this function, we detect such flags, decode the related
1977 field(s) and store the information in one of the related operands. The
1978 'one' operand is not any operand but one of the operands that can
1979 accommadate all the information that has been decoded. */
1982 do_special_decoding (aarch64_inst *inst)
1986 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1987 if (inst->opcode->flags & F_COND)
1989 value = extract_field (FLD_cond2, inst->value, 0);
1990 inst->cond = get_cond_from_value (value);
1993 if (inst->opcode->flags & F_SF)
1995 idx = select_operand_for_sf_field_coding (inst->opcode);
1996 value = extract_field (FLD_sf, inst->value, 0);
1997 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1998 if ((inst->opcode->flags & F_N)
1999 && extract_field (FLD_N, inst->value, 0) != value)
2003 if (inst->opcode->flags & F_LSE_SZ)
2005 idx = select_operand_for_sf_field_coding (inst->opcode);
2006 value = extract_field (FLD_lse_sz, inst->value, 0);
2007 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2009 /* size:Q fields. */
2010 if (inst->opcode->flags & F_SIZEQ)
2011 return decode_sizeq (inst);
2013 if (inst->opcode->flags & F_FPTYPE)
2015 idx = select_operand_for_fptype_field_coding (inst->opcode);
2016 value = extract_field (FLD_type, inst->value, 0);
2019 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2020 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2021 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2026 if (inst->opcode->flags & F_SSIZE)
2028 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2029 of the base opcode. */
2031 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2032 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2033 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2034 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2035 /* For most related instruciton, the 'size' field is fully available for
2036 operand encoding. */
2038 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2041 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2043 inst->operands[idx].qualifier
2044 = get_qualifier_from_partial_encoding (value, candidates, mask);
2048 if (inst->opcode->flags & F_T)
2050 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2053 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2054 == AARCH64_OPND_CLASS_SIMD_REG);
2065 val = extract_field (FLD_imm5, inst->value, 0);
2066 while ((val & 0x1) == 0 && ++num <= 3)
2070 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2071 inst->operands[0].qualifier =
2072 get_vreg_qualifier_from_value ((num << 1) | Q);
2075 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2077 /* Use Rt to encode in the case of e.g.
2078 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2079 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2082 /* Otherwise use the result operand, which has to be a integer
2084 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2085 == AARCH64_OPND_CLASS_INT_REG);
2088 assert (idx == 0 || idx == 1);
2089 value = extract_field (FLD_Q, inst->value, 0);
2090 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2093 if (inst->opcode->flags & F_LDS_SIZE)
2095 aarch64_field field = {0, 0};
2096 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2097 == AARCH64_OPND_CLASS_INT_REG);
2098 gen_sub_field (FLD_opc, 0, 1, &field);
2099 value = extract_field_2 (&field, inst->value, 0);
2100 inst->operands[0].qualifier
2101 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2104 /* Miscellaneous decoding; done as the last step. */
2105 if (inst->opcode->flags & F_MISC)
2106 return do_misc_decoding (inst);
2111 /* Converters converting a real opcode instruction to its alias form. */
2113 /* ROR <Wd>, <Ws>, #<shift>
2115 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2117 convert_extr_to_ror (aarch64_inst *inst)
2119 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2121 copy_operand_info (inst, 2, 3);
2122 inst->operands[3].type = AARCH64_OPND_NIL;
2128 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2130 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2132 convert_shll_to_xtl (aarch64_inst *inst)
2134 if (inst->operands[2].imm.value == 0)
2136 inst->operands[2].type = AARCH64_OPND_NIL;
2143 UBFM <Xd>, <Xn>, #<shift>, #63.
2145 LSR <Xd>, <Xn>, #<shift>. */
2147 convert_bfm_to_sr (aarch64_inst *inst)
2151 imms = inst->operands[3].imm.value;
2152 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2155 inst->operands[3].type = AARCH64_OPND_NIL;
2162 /* Convert MOV to ORR. */
2164 convert_orr_to_mov (aarch64_inst *inst)
2166 /* MOV <Vd>.<T>, <Vn>.<T>
2168 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2169 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2171 inst->operands[2].type = AARCH64_OPND_NIL;
2177 /* When <imms> >= <immr>, the instruction written:
2178 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2180 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2183 convert_bfm_to_bfx (aarch64_inst *inst)
2187 immr = inst->operands[2].imm.value;
2188 imms = inst->operands[3].imm.value;
2192 inst->operands[2].imm.value = lsb;
2193 inst->operands[3].imm.value = imms + 1 - lsb;
2194 /* The two opcodes have different qualifiers for
2195 the immediate operands; reset to help the checking. */
2196 reset_operand_qualifier (inst, 2);
2197 reset_operand_qualifier (inst, 3);
2204 /* When <imms> < <immr>, the instruction written:
2205 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2207 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2210 convert_bfm_to_bfi (aarch64_inst *inst)
2212 int64_t immr, imms, val;
2214 immr = inst->operands[2].imm.value;
2215 imms = inst->operands[3].imm.value;
2216 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2219 inst->operands[2].imm.value = (val - immr) & (val - 1);
2220 inst->operands[3].imm.value = imms + 1;
2221 /* The two opcodes have different qualifiers for
2222 the immediate operands; reset to help the checking. */
2223 reset_operand_qualifier (inst, 2);
2224 reset_operand_qualifier (inst, 3);
2231 /* The instruction written:
2232 BFC <Xd>, #<lsb>, #<width>
2234 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2237 convert_bfm_to_bfc (aarch64_inst *inst)
2239 int64_t immr, imms, val;
2241 /* Should have been assured by the base opcode value. */
2242 assert (inst->operands[1].reg.regno == 0x1f);
2244 immr = inst->operands[2].imm.value;
2245 imms = inst->operands[3].imm.value;
2246 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2249 /* Drop XZR from the second operand. */
2250 copy_operand_info (inst, 1, 2);
2251 copy_operand_info (inst, 2, 3);
2252 inst->operands[3].type = AARCH64_OPND_NIL;
2254 /* Recalculate the immediates. */
2255 inst->operands[1].imm.value = (val - immr) & (val - 1);
2256 inst->operands[2].imm.value = imms + 1;
2258 /* The two opcodes have different qualifiers for the operands; reset to
2259 help the checking. */
2260 reset_operand_qualifier (inst, 1);
2261 reset_operand_qualifier (inst, 2);
2262 reset_operand_qualifier (inst, 3);
2270 /* The instruction written:
2271 LSL <Xd>, <Xn>, #<shift>
2273 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2276 convert_ubfm_to_lsl (aarch64_inst *inst)
2278 int64_t immr = inst->operands[2].imm.value;
2279 int64_t imms = inst->operands[3].imm.value;
2281 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2283 if ((immr == 0 && imms == val) || immr == imms + 1)
2285 inst->operands[3].type = AARCH64_OPND_NIL;
2286 inst->operands[2].imm.value = val - imms;
2293 /* CINC <Wd>, <Wn>, <cond>
2295 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2296 where <cond> is not AL or NV. */
2299 convert_from_csel (aarch64_inst *inst)
2301 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2302 && (inst->operands[3].cond->value & 0xe) != 0xe)
2304 copy_operand_info (inst, 2, 3);
2305 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2306 inst->operands[3].type = AARCH64_OPND_NIL;
2312 /* CSET <Wd>, <cond>
2314 CSINC <Wd>, WZR, WZR, invert(<cond>)
2315 where <cond> is not AL or NV. */
2318 convert_csinc_to_cset (aarch64_inst *inst)
2320 if (inst->operands[1].reg.regno == 0x1f
2321 && inst->operands[2].reg.regno == 0x1f
2322 && (inst->operands[3].cond->value & 0xe) != 0xe)
2324 copy_operand_info (inst, 1, 3);
2325 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2326 inst->operands[3].type = AARCH64_OPND_NIL;
2327 inst->operands[2].type = AARCH64_OPND_NIL;
2335 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2337 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2338 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2339 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2340 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2341 machine-instruction mnemonic must be used. */
2344 convert_movewide_to_mov (aarch64_inst *inst)
2346 uint64_t value = inst->operands[1].imm.value;
2347 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2348 if (value == 0 && inst->operands[1].shifter.amount != 0)
2350 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2351 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2352 value <<= inst->operands[1].shifter.amount;
2353 /* As an alias convertor, it has to be clear that the INST->OPCODE
2354 is the opcode of the real instruction. */
2355 if (inst->opcode->op == OP_MOVN)
2357 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2359 /* A MOVN has an immediate that could be encoded by MOVZ. */
2360 if (aarch64_wide_constant_p (value, is32, NULL))
2363 inst->operands[1].imm.value = value;
2364 inst->operands[1].shifter.amount = 0;
2370 ORR <Wd>, WZR, #<imm>.
2372 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2373 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2374 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2375 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2376 machine-instruction mnemonic must be used. */
2379 convert_movebitmask_to_mov (aarch64_inst *inst)
2384 /* Should have been assured by the base opcode value. */
2385 assert (inst->operands[1].reg.regno == 0x1f);
2386 copy_operand_info (inst, 1, 2);
2387 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2388 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2389 value = inst->operands[1].imm.value;
2390 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2392 if (inst->operands[0].reg.regno != 0x1f
2393 && (aarch64_wide_constant_p (value, is32, NULL)
2394 || aarch64_wide_constant_p (~value, is32, NULL)))
2397 inst->operands[2].type = AARCH64_OPND_NIL;
2401 /* Some alias opcodes are disassembled by being converted from their real-form.
2402 N.B. INST->OPCODE is the real opcode rather than the alias. */
2405 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2411 return convert_bfm_to_sr (inst);
2413 return convert_ubfm_to_lsl (inst);
2417 return convert_from_csel (inst);
2420 return convert_csinc_to_cset (inst);
2424 return convert_bfm_to_bfx (inst);
2428 return convert_bfm_to_bfi (inst);
2430 return convert_bfm_to_bfc (inst);
2432 return convert_orr_to_mov (inst);
2433 case OP_MOV_IMM_WIDE:
2434 case OP_MOV_IMM_WIDEN:
2435 return convert_movewide_to_mov (inst);
2436 case OP_MOV_IMM_LOG:
2437 return convert_movebitmask_to_mov (inst);
2439 return convert_extr_to_ror (inst);
2444 return convert_shll_to_xtl (inst);
2450 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2451 aarch64_inst *, int);
2453 /* Given the instruction information in *INST, check if the instruction has
2454 any alias form that can be used to represent *INST. If the answer is yes,
2455 update *INST to be in the form of the determined alias. */
2457 /* In the opcode description table, the following flags are used in opcode
2458 entries to help establish the relations between the real and alias opcodes:
2460 F_ALIAS: opcode is an alias
2461 F_HAS_ALIAS: opcode has alias(es)
2464 F_P3: Disassembly preference priority 1-3 (the larger the
2465 higher). If nothing is specified, it is the priority
2466 0 by default, i.e. the lowest priority.
2468 Although the relation between the machine and the alias instructions are not
2469 explicitly described, it can be easily determined from the base opcode
2470 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2471 description entries:
2473 The mask of an alias opcode must be equal to or a super-set (i.e. more
2474 constrained) of that of the aliased opcode; so is the base opcode value.
2476 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2477 && (opcode->mask & real->mask) == real->mask
2478 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2479 then OPCODE is an alias of, and only of, the REAL instruction
2481 The alias relationship is forced flat-structured to keep related algorithm
2482 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2484 During the disassembling, the decoding decision tree (in
2485 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2486 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2487 not specified), the disassembler will check whether there is any alias
2488 instruction exists for this real instruction. If there is, the disassembler
2489 will try to disassemble the 32-bit binary again using the alias's rule, or
2490 try to convert the IR to the form of the alias. In the case of the multiple
2491 aliases, the aliases are tried one by one from the highest priority
2492 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2493 first succeeds first adopted.
2495 You may ask why there is a need for the conversion of IR from one form to
2496 another in handling certain aliases. This is because on one hand it avoids
2497 adding more operand code to handle unusual encoding/decoding; on other
2498 hand, during the disassembling, the conversion is an effective approach to
2499 check the condition of an alias (as an alias may be adopted only if certain
2500 conditions are met).
2502 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2503 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2504 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2507 determine_disassembling_preference (struct aarch64_inst *inst)
2509 const aarch64_opcode *opcode;
2510 const aarch64_opcode *alias;
2512 opcode = inst->opcode;
2514 /* This opcode does not have an alias, so use itself. */
2515 if (!opcode_has_alias (opcode))
2518 alias = aarch64_find_alias_opcode (opcode);
2521 #ifdef DEBUG_AARCH64
2524 const aarch64_opcode *tmp = alias;
2525 printf ("#### LIST orderd: ");
2528 printf ("%s, ", tmp->name);
2529 tmp = aarch64_find_next_alias_opcode (tmp);
2533 #endif /* DEBUG_AARCH64 */
2535 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2537 DEBUG_TRACE ("try %s", alias->name);
2538 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2540 /* An alias can be a pseudo opcode which will never be used in the
2541 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2543 if (pseudo_opcode_p (alias))
2545 DEBUG_TRACE ("skip pseudo %s", alias->name);
2549 if ((inst->value & alias->mask) != alias->opcode)
2551 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2554 /* No need to do any complicated transformation on operands, if the alias
2555 opcode does not have any operand. */
2556 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2558 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2559 aarch64_replace_opcode (inst, alias);
2562 if (alias->flags & F_CONV)
2565 memcpy (©, inst, sizeof (aarch64_inst));
2566 /* ALIAS is the preference as long as the instruction can be
2567 successfully converted to the form of ALIAS. */
2568 if (convert_to_alias (©, alias) == 1)
2570 aarch64_replace_opcode (©, alias);
2571 assert (aarch64_match_operands_constraint (©, NULL));
2572 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2573 memcpy (inst, ©, sizeof (aarch64_inst));
2579 /* Directly decode the alias opcode. */
2581 memset (&temp, '\0', sizeof (aarch64_inst));
2582 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2584 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2585 memcpy (inst, &temp, sizeof (aarch64_inst));
2592 /* Some instructions (including all SVE ones) use the instruction class
2593 to describe how a qualifiers_list index is represented in the instruction
2594 encoding. If INST is such an instruction, decode the appropriate fields
2595 and fill in the operand qualifiers accordingly. Return true if no
2596 problems are found. */
2599 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2604 switch (inst->opcode->iclass)
2607 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2611 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2614 while ((i & 1) == 0)
2622 /* Pick the smallest applicable element size. */
2623 if ((inst->value & 0x20600) == 0x600)
2625 else if ((inst->value & 0x20400) == 0x400)
2627 else if ((inst->value & 0x20000) == 0)
2634 /* sve_misc instructions have only a single variant. */
2638 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2642 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2645 case sve_shift_pred:
2646 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2657 case sve_shift_unpred:
2658 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2662 variant = extract_field (FLD_size, inst->value, 0);
2668 variant = extract_field (FLD_size, inst->value, 0);
2672 i = extract_field (FLD_size, inst->value, 0);
2679 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2683 /* No mapping between instruction class and qualifiers. */
2687 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2688 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2691 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2692 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2695 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2696 determined and used to disassemble CODE; this is done just before the
2700 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2701 aarch64_inst *inst, int noaliases_p)
2705 DEBUG_TRACE ("enter with %s", opcode->name);
2707 assert (opcode && inst);
2709 /* Check the base opcode. */
2710 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2712 DEBUG_TRACE ("base opcode match FAIL");
2717 memset (inst, '\0', sizeof (aarch64_inst));
2719 inst->opcode = opcode;
2722 /* Assign operand codes and indexes. */
2723 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2725 if (opcode->operands[i] == AARCH64_OPND_NIL)
2727 inst->operands[i].type = opcode->operands[i];
2728 inst->operands[i].idx = i;
2731 /* Call the opcode decoder indicated by flags. */
2732 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2734 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2738 /* Possibly use the instruction class to determine the correct
2740 if (!aarch64_decode_variant_using_iclass (inst))
2742 DEBUG_TRACE ("iclass-based decoder FAIL");
2746 /* Call operand decoders. */
2747 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2749 const aarch64_operand *opnd;
2750 enum aarch64_opnd type;
2752 type = opcode->operands[i];
2753 if (type == AARCH64_OPND_NIL)
2755 opnd = &aarch64_operands[type];
2756 if (operand_has_extractor (opnd)
2757 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2759 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2764 /* If the opcode has a verifier, then check it now. */
2765 if (opcode->verifier && ! opcode->verifier (opcode, code))
2767 DEBUG_TRACE ("operand verifier FAIL");
2771 /* Match the qualifiers. */
2772 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2774 /* Arriving here, the CODE has been determined as a valid instruction
2775 of OPCODE and *INST has been filled with information of this OPCODE
2776 instruction. Before the return, check if the instruction has any
2777 alias and should be disassembled in the form of its alias instead.
2778 If the answer is yes, *INST will be updated. */
2780 determine_disassembling_preference (inst);
2781 DEBUG_TRACE ("SUCCESS");
2786 DEBUG_TRACE ("constraint matching FAIL");
2793 /* This does some user-friendly fix-up to *INST. It is currently focus on
2794 the adjustment of qualifiers to help the printed instruction
2795 recognized/understood more easily. */
2798 user_friendly_fixup (aarch64_inst *inst)
2800 switch (inst->opcode->iclass)
2803 /* TBNZ Xn|Wn, #uimm6, label
2804 Test and Branch Not Zero: conditionally jumps to label if bit number
2805 uimm6 in register Xn is not zero. The bit number implies the width of
2806 the register, which may be written and should be disassembled as Wn if
2807 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2809 if (inst->operands[1].imm.value < 32)
2810 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2816 /* Decode INSN and fill in *INST the instruction information. An alias
2817 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2821 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2822 bfd_boolean noaliases_p)
2824 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2826 #ifdef DEBUG_AARCH64
2829 const aarch64_opcode *tmp = opcode;
2831 DEBUG_TRACE ("opcode lookup:");
2834 aarch64_verbose (" %s", tmp->name);
2835 tmp = aarch64_find_next_opcode (tmp);
2838 #endif /* DEBUG_AARCH64 */
2840 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2841 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2842 opcode field and value, apart from the difference that one of them has an
2843 extra field as part of the opcode, but such a field is used for operand
2844 encoding in other opcode(s) ('immh' in the case of the example). */
2845 while (opcode != NULL)
2847 /* But only one opcode can be decoded successfully for, as the
2848 decoding routine will check the constraint carefully. */
2849 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2851 opcode = aarch64_find_next_opcode (opcode);
2857 /* Print operands. */
2860 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2861 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2863 int i, pcrel_p, num_printed;
2864 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2867 /* We regard the opcode operand info more, however we also look into
2868 the inst->operands to support the disassembling of the optional
2870 The two operand code should be the same in all cases, apart from
2871 when the operand can be optional. */
2872 if (opcode->operands[i] == AARCH64_OPND_NIL
2873 || opnds[i].type == AARCH64_OPND_NIL)
2876 /* Generate the operand string in STR. */
2877 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2880 /* Print the delimiter (taking account of omitted operand(s)). */
2882 (*info->fprintf_func) (info->stream, "%s",
2883 num_printed++ == 0 ? "\t" : ", ");
2885 /* Print the operand. */
2887 (*info->print_address_func) (info->target, info);
2889 (*info->fprintf_func) (info->stream, "%s", str);
2893 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2896 remove_dot_suffix (char *name, const aarch64_inst *inst)
2901 ptr = strchr (inst->opcode->name, '.');
2902 assert (ptr && inst->cond);
2903 len = ptr - inst->opcode->name;
2905 strncpy (name, inst->opcode->name, len);
2909 /* Print the instruction mnemonic name. */
2912 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2914 if (inst->opcode->flags & F_COND)
2916 /* For instructions that are truly conditionally executed, e.g. b.cond,
2917 prepare the full mnemonic name with the corresponding condition
2921 remove_dot_suffix (name, inst);
2922 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2925 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2928 /* Decide whether we need to print a comment after the operands of
2929 instruction INST. */
2932 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2934 if (inst->opcode->flags & F_COND)
2937 unsigned int i, num_conds;
2939 remove_dot_suffix (name, inst);
2940 num_conds = ARRAY_SIZE (inst->cond->names);
2941 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2942 (*info->fprintf_func) (info->stream, "%s %s.%s",
2943 i == 1 ? " //" : ",",
2944 name, inst->cond->names[i]);
2948 /* Print the instruction according to *INST. */
2951 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2952 struct disassemble_info *info)
2954 print_mnemonic_name (inst, info);
2955 print_operands (pc, inst->opcode, inst->operands, info);
2956 print_comment (inst, info);
2959 /* Entry-point of the instruction disassembler and printer. */
2962 print_insn_aarch64_word (bfd_vma pc,
2964 struct disassemble_info *info)
2966 static const char *err_msg[6] =
2969 [-ERR_UND] = "undefined",
2970 [-ERR_UNP] = "unpredictable",
2977 info->insn_info_valid = 1;
2978 info->branch_delay_insns = 0;
2979 info->data_size = 0;
2983 if (info->flags & INSN_HAS_RELOC)
2984 /* If the instruction has a reloc associated with it, then
2985 the offset field in the instruction will actually be the
2986 addend for the reloc. (If we are using REL type relocs).
2987 In such cases, we can ignore the pc when computing
2988 addresses, since the addend is not currently pc-relative. */
2991 ret = aarch64_decode_insn (word, &inst, no_aliases);
2993 if (((word >> 21) & 0x3ff) == 1)
2995 /* RESERVED for ALES. */
2996 assert (ret != ERR_OK);
3005 /* Handle undefined instructions. */
3006 info->insn_type = dis_noninsn;
3007 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3008 word, err_msg[-ret]);
3011 user_friendly_fixup (&inst);
3012 print_aarch64_insn (pc, &inst, info);
3019 /* Disallow mapping symbols ($x, $d etc) from
3020 being displayed in symbol relative addresses. */
3023 aarch64_symbol_is_valid (asymbol * sym,
3024 struct disassemble_info * info ATTRIBUTE_UNUSED)
3031 name = bfd_asymbol_name (sym);
3035 || (name[1] != 'x' && name[1] != 'd')
3036 || (name[2] != '\0' && name[2] != '.'));
3039 /* Print data bytes on INFO->STREAM. */
3042 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3044 struct disassemble_info *info)
3046 switch (info->bytes_per_chunk)
3049 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3052 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3055 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3062 /* Try to infer the code or data type from a symbol.
3063 Returns nonzero if *MAP_TYPE was set. */
3066 get_sym_code_type (struct disassemble_info *info, int n,
3067 enum map_type *map_type)
3069 elf_symbol_type *es;
3073 es = *(elf_symbol_type **)(info->symtab + n);
3074 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3076 /* If the symbol has function type then use that. */
3077 if (type == STT_FUNC)
3079 *map_type = MAP_INSN;
3083 /* Check for mapping symbols. */
3084 name = bfd_asymbol_name(info->symtab[n]);
3086 && (name[1] == 'x' || name[1] == 'd')
3087 && (name[2] == '\0' || name[2] == '.'))
3089 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3096 /* Entry-point of the AArch64 disassembler. */
3099 print_insn_aarch64 (bfd_vma pc,
3100 struct disassemble_info *info)
3102 bfd_byte buffer[INSNLEN];
3104 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3105 bfd_boolean found = FALSE;
3106 unsigned int size = 4;
3109 if (info->disassembler_options)
3111 set_default_aarch64_dis_options (info);
3113 parse_aarch64_dis_options (info->disassembler_options);
3115 /* To avoid repeated parsing of these options, we remove them here. */
3116 info->disassembler_options = NULL;
3119 /* Aarch64 instructions are always little-endian */
3120 info->endian_code = BFD_ENDIAN_LITTLE;
3122 /* First check the full symtab for a mapping symbol, even if there
3123 are no usable non-mapping symbols for this address. */
3124 if (info->symtab_size != 0
3125 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3127 enum map_type type = MAP_INSN;
3132 if (pc <= last_mapping_addr)
3133 last_mapping_sym = -1;
3135 /* Start scanning at the start of the function, or wherever
3136 we finished last time. */
3137 n = info->symtab_pos + 1;
3138 if (n < last_mapping_sym)
3139 n = last_mapping_sym;
3141 /* Scan up to the location being disassembled. */
3142 for (; n < info->symtab_size; n++)
3144 addr = bfd_asymbol_value (info->symtab[n]);
3147 if ((info->section == NULL
3148 || info->section == info->symtab[n]->section)
3149 && get_sym_code_type (info, n, &type))
3158 n = info->symtab_pos;
3159 if (n < last_mapping_sym)
3160 n = last_mapping_sym;
3162 /* No mapping symbol found at this address. Look backwards
3163 for a preceeding one. */
3166 if (get_sym_code_type (info, n, &type))
3175 last_mapping_sym = last_sym;
3178 /* Look a little bit ahead to see if we should print out
3179 less than four bytes of data. If there's a symbol,
3180 mapping or otherwise, after two bytes then don't
3182 if (last_type == MAP_DATA)
3184 size = 4 - (pc & 3);
3185 for (n = last_sym + 1; n < info->symtab_size; n++)
3187 addr = bfd_asymbol_value (info->symtab[n]);
3190 if (addr - pc < size)
3195 /* If the next symbol is after three bytes, we need to
3196 print only part of the data, so that we can use either
3199 size = (pc & 1) ? 1 : 2;
3203 if (last_type == MAP_DATA)
3205 /* size was set above. */
3206 info->bytes_per_chunk = size;
3207 info->display_endian = info->endian;
3208 printer = print_insn_data;
3212 info->bytes_per_chunk = size = INSNLEN;
3213 info->display_endian = info->endian_code;
3214 printer = print_insn_aarch64_word;
3217 status = (*info->read_memory_func) (pc, buffer, size, info);
3220 (*info->memory_error_func) (status, pc, info);
3224 data = bfd_get_bits (buffer, size * 8,
3225 info->display_endian == BFD_ENDIAN_BIG);
3227 (*printer) (pc, data, info);
3233 print_aarch64_disassembler_options (FILE *stream)
3235 fprintf (stream, _("\n\
3236 The following AARCH64 specific disassembler options are supported for use\n\
3237 with the -M switch (multiple options should be separated by commas):\n"));
3239 fprintf (stream, _("\n\
3240 no-aliases Don't print instruction aliases.\n"));
3242 fprintf (stream, _("\n\
3243 aliases Do print instruction aliases.\n"));
3245 #ifdef DEBUG_AARCH64
3246 fprintf (stream, _("\n\
3247 debug_dump Temp switch for debug trace.\n"));
3248 #endif /* DEBUG_AARCH64 */
3250 fprintf (stream, _("\n"));