1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
66 if (CONST_STRNEQ (option, "aliases"))
73 if (CONST_STRNEQ (option, "debug_dump"))
78 #endif /* DEBUG_AARCH64 */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
85 parse_aarch64_dis_options (const char *options)
87 const char *option_end;
92 while (*options != '\0')
94 /* Skip empty options. */
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
106 parse_aarch64_dis_option (options, option_end - options);
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
114 /* Functions doing the instruction disassembling. */
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
135 num = va_arg (va, uint32_t);
137 aarch64_insn value = 0x0;
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
156 enum aarch64_field_kind kind;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
172 uint32_t ret = value;
175 if ((value >> i) & 0x1)
177 uint32_t val = (uint32_t)(-1) << i;
180 return (int32_t) ret;
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
204 /* Instructions using vector type 2H should not call this function. Skip over
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
238 return qualifiers[i];
240 return AARCH64_OPND_QLF_NIL;
243 /* Operand extractors. */
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
250 info->reg.regno = extract_field (self->fields[0], code, 0);
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
259 assert (info->idx == 1
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
265 /* e.g. IC <ic_op>{, <Xt>}. */
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
290 info->reglane.regno = extract_field (self->fields[0], code,
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
337 case AARCH64_OPND_QLF_S_H:
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
341 info->reglane.regno &= 0xf;
343 case AARCH64_OPND_QLF_S_S:
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
347 case AARCH64_OPND_QLF_S_D:
349 info->reglane.index = extract_field (FLD_H, code, 0);
360 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
361 const aarch64_insn code,
362 const aarch64_inst *inst ATTRIBUTE_UNUSED)
365 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
367 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
371 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
373 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
374 aarch64_opnd_info *info, const aarch64_insn code,
375 const aarch64_inst *inst)
378 /* Number of elements in each structure to be loaded/stored. */
379 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
383 unsigned is_reserved;
385 unsigned num_elements;
401 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
403 value = extract_field (FLD_opcode, code, 0);
404 if (expected_num != data[value].num_elements || data[value].is_reserved)
406 info->reglist.num_regs = data[value].num_regs;
411 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
412 lanes instructions. */
414 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
415 aarch64_opnd_info *info, const aarch64_insn code,
416 const aarch64_inst *inst)
421 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
423 value = extract_field (FLD_S, code, 0);
425 /* Number of registers is equal to the number of elements in
426 each structure to be loaded/stored. */
427 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
428 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
430 /* Except when it is LD1R. */
431 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
432 info->reglist.num_regs = 2;
437 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
438 load/store single element instructions. */
440 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst ATTRIBUTE_UNUSED)
444 aarch64_field field = {0, 0};
445 aarch64_insn QSsize; /* fields Q:S:size. */
446 aarch64_insn opcodeh2; /* opcode<2:1> */
449 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
451 /* Decode the index, opcode<2:1> and size. */
452 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
453 opcodeh2 = extract_field_2 (&field, code, 0);
454 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
458 info->qualifier = AARCH64_OPND_QLF_S_B;
459 /* Index encoded in "Q:S:size". */
460 info->reglist.index = QSsize;
466 info->qualifier = AARCH64_OPND_QLF_S_H;
467 /* Index encoded in "Q:S:size<1>". */
468 info->reglist.index = QSsize >> 1;
471 if ((QSsize >> 1) & 0x1)
474 if ((QSsize & 0x1) == 0)
476 info->qualifier = AARCH64_OPND_QLF_S_S;
477 /* Index encoded in "Q:S". */
478 info->reglist.index = QSsize >> 2;
482 if (extract_field (FLD_S, code, 0))
485 info->qualifier = AARCH64_OPND_QLF_S_D;
486 /* Index encoded in "Q". */
487 info->reglist.index = QSsize >> 3;
494 info->reglist.has_index = 1;
495 info->reglist.num_regs = 0;
496 /* Number of registers is equal to the number of elements in
497 each structure to be loaded/stored. */
498 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
499 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
504 /* Decode fields immh:immb and/or Q for e.g.
505 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
506 or SSHR <V><d>, <V><n>, #<shift>. */
509 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 aarch64_opnd_info *info, const aarch64_insn code,
511 const aarch64_inst *inst)
514 aarch64_insn Q, imm, immh;
515 enum aarch64_insn_class iclass = inst->opcode->iclass;
517 immh = extract_field (FLD_immh, code, 0);
520 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
522 /* Get highest set bit in immh. */
523 while (--pos >= 0 && (immh & 0x8) == 0)
526 assert ((iclass == asimdshf || iclass == asisdshf)
527 && (info->type == AARCH64_OPND_IMM_VLSR
528 || info->type == AARCH64_OPND_IMM_VLSL));
530 if (iclass == asimdshf)
532 Q = extract_field (FLD_Q, code, 0);
534 0000 x SEE AdvSIMD modified immediate
544 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
547 info->qualifier = get_sreg_qualifier_from_value (pos);
549 if (info->type == AARCH64_OPND_IMM_VLSR)
551 0000 SEE AdvSIMD modified immediate
552 0001 (16-UInt(immh:immb))
553 001x (32-UInt(immh:immb))
554 01xx (64-UInt(immh:immb))
555 1xxx (128-UInt(immh:immb)) */
556 info->imm.value = (16 << pos) - imm;
560 0000 SEE AdvSIMD modified immediate
561 0001 (UInt(immh:immb)-8)
562 001x (UInt(immh:immb)-16)
563 01xx (UInt(immh:immb)-32)
564 1xxx (UInt(immh:immb)-64) */
565 info->imm.value = imm - (8 << pos);
570 /* Decode shift immediate for e.g. sshr (imm). */
572 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
573 aarch64_opnd_info *info, const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED)
578 val = extract_field (FLD_size, code, 0);
581 case 0: imm = 8; break;
582 case 1: imm = 16; break;
583 case 2: imm = 32; break;
586 info->imm.value = imm;
590 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
591 value in the field(s) will be extracted as unsigned immediate value. */
593 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
594 const aarch64_insn code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
599 imm = extract_all_fields (self, code);
601 if (operand_need_sign_extension (self))
602 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
604 if (operand_need_shift_by_two (self))
607 if (info->type == AARCH64_OPND_ADDR_ADRP)
610 info->imm.value = imm;
614 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
616 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
617 const aarch64_insn code,
618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
620 aarch64_ext_imm (self, info, code, inst);
621 info->shifter.kind = AARCH64_MOD_LSL;
622 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
626 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
627 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
629 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
630 aarch64_opnd_info *info,
631 const aarch64_insn code,
632 const aarch64_inst *inst ATTRIBUTE_UNUSED)
635 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
636 aarch64_field field = {0, 0};
638 assert (info->idx == 1);
640 if (info->type == AARCH64_OPND_SIMD_FPIMM)
643 /* a:b:c:d:e:f:g:h */
644 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
645 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
647 /* Either MOVI <Dd>, #<imm>
648 or MOVI <Vd>.2D, #<imm>.
649 <imm> is a 64-bit immediate
650 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
651 encoded in "a:b:c:d:e:f:g:h". */
653 unsigned abcdefgh = imm;
654 for (imm = 0ull, i = 0; i < 8; i++)
655 if (((abcdefgh >> i) & 0x1) != 0)
656 imm |= 0xffull << (8 * i);
658 info->imm.value = imm;
661 info->qualifier = get_expected_qualifier (inst, info->idx);
662 switch (info->qualifier)
664 case AARCH64_OPND_QLF_NIL:
666 info->shifter.kind = AARCH64_MOD_NONE;
668 case AARCH64_OPND_QLF_LSL:
670 info->shifter.kind = AARCH64_MOD_LSL;
671 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
673 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
674 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
675 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
676 default: assert (0); return 0;
678 /* 00: 0; 01: 8; 10:16; 11:24. */
679 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
681 case AARCH64_OPND_QLF_MSL:
683 info->shifter.kind = AARCH64_MOD_MSL;
684 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
685 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
695 /* Decode an 8-bit floating-point immediate. */
697 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701 info->imm.value = extract_all_fields (self, code);
706 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
708 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 aarch64_opnd_info *info, const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
712 info->imm.value = 64- extract_field (FLD_scale, code, 0);
716 /* Decode arithmetic immediate for e.g.
717 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
719 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
720 aarch64_opnd_info *info, const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
725 info->shifter.kind = AARCH64_MOD_LSL;
727 value = extract_field (FLD_shift, code, 0);
730 info->shifter.amount = value ? 12 : 0;
731 /* imm12 (unsigned) */
732 info->imm.value = extract_field (FLD_imm12, code, 0);
737 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
740 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
741 aarch64_opnd_info *info, const aarch64_insn code,
742 const aarch64_inst *inst ATTRIBUTE_UNUSED)
750 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
751 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
752 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
753 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
755 /* value is N:immr:imms. */
757 R = (value >> 6) & 0x3f;
758 N = (value >> 12) & 0x1;
760 if (sf == 0 && N == 1)
763 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
764 (in other words, right rotated by R), then replicated. */
768 mask = 0xffffffffffffffffull;
774 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
775 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
776 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
777 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
778 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
781 mask = (1ull << simd_size) - 1;
782 /* Top bits are IGNORED. */
785 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
786 if (S == simd_size - 1)
788 /* S+1 consecutive bits to 1. */
789 /* NOTE: S can't be 63 due to detection above. */
790 imm = (1ull << (S + 1)) - 1;
791 /* Rotate to the left by simd_size - R. */
793 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
794 /* Replicate the value according to SIMD size. */
797 case 2: imm = (imm << 2) | imm;
798 case 4: imm = (imm << 4) | imm;
799 case 8: imm = (imm << 8) | imm;
800 case 16: imm = (imm << 16) | imm;
801 case 32: imm = (imm << 32) | imm;
803 default: assert (0); return 0;
806 info->imm.value = sf ? imm : imm & 0xffffffff;
811 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
812 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
814 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
815 aarch64_opnd_info *info,
816 const aarch64_insn code, const aarch64_inst *inst)
821 info->reg.regno = extract_field (FLD_Rt, code, 0);
824 value = extract_field (FLD_ldst_size, code, 0);
825 if (inst->opcode->iclass == ldstpair_indexed
826 || inst->opcode->iclass == ldstnapair_offs
827 || inst->opcode->iclass == ldstpair_off
828 || inst->opcode->iclass == loadlit)
830 enum aarch64_opnd_qualifier qualifier;
833 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
834 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
835 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
838 info->qualifier = qualifier;
843 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
846 info->qualifier = get_sreg_qualifier_from_value (value);
852 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
854 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
855 aarch64_opnd_info *info,
857 const aarch64_inst *inst ATTRIBUTE_UNUSED)
860 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
864 /* Decode the address operand for e.g.
865 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
867 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
868 aarch64_opnd_info *info,
869 aarch64_insn code, const aarch64_inst *inst)
871 aarch64_insn S, value;
874 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
876 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
878 value = extract_field (FLD_option, code, 0);
880 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
881 /* Fix-up the shifter kind; although the table-driven approach is
882 efficient, it is slightly inflexible, thus needing this fix-up. */
883 if (info->shifter.kind == AARCH64_MOD_UXTX)
884 info->shifter.kind = AARCH64_MOD_LSL;
886 S = extract_field (FLD_S, code, 0);
889 info->shifter.amount = 0;
890 info->shifter.amount_present = 0;
895 /* Need information in other operand(s) to help achieve the decoding
897 info->qualifier = get_expected_qualifier (inst, info->idx);
898 /* Get the size of the data element that is accessed, which may be
899 different from that of the source register size, e.g. in strb/ldrb. */
900 size = aarch64_get_qualifier_esize (info->qualifier);
901 info->shifter.amount = get_logsz (size);
902 info->shifter.amount_present = 1;
908 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
910 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
911 aarch64_insn code, const aarch64_inst *inst)
914 info->qualifier = get_expected_qualifier (inst, info->idx);
917 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
918 /* simm (imm9 or imm7) */
919 imm = extract_field (self->fields[0], code, 0);
920 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
921 if (self->fields[0] == FLD_imm7)
922 /* scaled immediate in ld/st pair instructions. */
923 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
925 if (inst->opcode->iclass == ldst_unscaled
926 || inst->opcode->iclass == ldstnapair_offs
927 || inst->opcode->iclass == ldstpair_off
928 || inst->opcode->iclass == ldst_unpriv)
929 info->addr.writeback = 0;
932 /* pre/post- index */
933 info->addr.writeback = 1;
934 if (extract_field (self->fields[1], code, 0) == 1)
935 info->addr.preind = 1;
937 info->addr.postind = 1;
943 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
945 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
947 const aarch64_inst *inst ATTRIBUTE_UNUSED)
950 info->qualifier = get_expected_qualifier (inst, info->idx);
951 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
953 info->addr.base_regno = extract_field (self->fields[0], code, 0);
955 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
959 /* Decode the address operand for e.g.
960 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
962 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
963 aarch64_opnd_info *info,
964 aarch64_insn code, const aarch64_inst *inst)
966 /* The opcode dependent area stores the number of elements in
967 each structure to be loaded/stored. */
968 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
971 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
973 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
974 if (info->addr.offset.regno == 31)
976 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
977 /* Special handling of loading single structure to all lane. */
978 info->addr.offset.imm = (is_ld1r ? 1
979 : inst->operands[0].reglist.num_regs)
980 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
982 info->addr.offset.imm = inst->operands[0].reglist.num_regs
983 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
984 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
987 info->addr.offset.is_reg = 1;
988 info->addr.writeback = 1;
993 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
995 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
996 aarch64_opnd_info *info,
997 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1001 value = extract_field (FLD_cond, code, 0);
1002 info->cond = get_cond_from_value (value);
1006 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1008 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 aarch64_opnd_info *info,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1013 /* op0:op1:CRn:CRm:op2 */
1014 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1019 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1021 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1022 aarch64_opnd_info *info, aarch64_insn code,
1023 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1027 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1028 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1029 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1031 /* Reserved value in <pstatefield>. */
1035 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1037 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1038 aarch64_opnd_info *info,
1040 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1044 const aarch64_sys_ins_reg *sysins_ops;
1045 /* op0:op1:CRn:CRm:op2 */
1046 value = extract_fields (code, 0, 5,
1047 FLD_op0, FLD_op1, FLD_CRn,
1052 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1053 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1054 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1055 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1056 default: assert (0); return 0;
1059 for (i = 0; sysins_ops[i].name != NULL; ++i)
1060 if (sysins_ops[i].value == value)
1062 info->sysins_op = sysins_ops + i;
1063 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1064 info->sysins_op->name,
1065 (unsigned)info->sysins_op->value,
1066 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1073 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1076 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1077 aarch64_opnd_info *info,
1079 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1082 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1086 /* Decode the prefetch operation option operand for e.g.
1087 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1090 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1091 aarch64_opnd_info *info,
1092 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1095 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1099 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1100 to the matching name/value pair in aarch64_hint_options. */
1103 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1104 aarch64_opnd_info *info,
1106 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1109 unsigned hint_number;
1112 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1114 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1116 if (hint_number == aarch64_hint_options[i].value)
1118 info->hint_option = &(aarch64_hint_options[i]);
1126 /* Decode the extended register operand for e.g.
1127 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1129 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1130 aarch64_opnd_info *info,
1132 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1137 info->reg.regno = extract_field (FLD_Rm, code, 0);
1139 value = extract_field (FLD_option, code, 0);
1140 info->shifter.kind =
1141 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1143 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1145 /* This makes the constraint checking happy. */
1146 info->shifter.operator_present = 1;
1148 /* Assume inst->operands[0].qualifier has been resolved. */
1149 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1150 info->qualifier = AARCH64_OPND_QLF_W;
1151 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1152 && (info->shifter.kind == AARCH64_MOD_UXTX
1153 || info->shifter.kind == AARCH64_MOD_SXTX))
1154 info->qualifier = AARCH64_OPND_QLF_X;
1159 /* Decode the shifted register operand for e.g.
1160 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1162 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1163 aarch64_opnd_info *info,
1165 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1170 info->reg.regno = extract_field (FLD_Rm, code, 0);
1172 value = extract_field (FLD_shift, code, 0);
1173 info->shifter.kind =
1174 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1175 if (info->shifter.kind == AARCH64_MOD_ROR
1176 && inst->opcode->iclass != log_shift)
1177 /* ROR is not available for the shifted register operand in arithmetic
1181 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1183 /* This makes the constraint checking happy. */
1184 info->shifter.operator_present = 1;
1189 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1190 array specifies which field to use for Zn. MM is encoded in the
1191 concatenation of imm5 and SVE_tszh, with imm5 being the less
1192 significant part. */
1194 aarch64_ext_sve_index (const aarch64_operand *self,
1195 aarch64_opnd_info *info, aarch64_insn code,
1196 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1200 info->reglane.regno = extract_field (self->fields[0], code, 0);
1201 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1202 if ((val & 15) == 0)
1204 while ((val & 1) == 0)
1206 info->reglane.index = val / 2;
1210 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1211 to use for Zn. The opcode-dependent value specifies the number
1212 of registers in the list. */
1214 aarch64_ext_sve_reglist (const aarch64_operand *self,
1215 aarch64_opnd_info *info, aarch64_insn code,
1216 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1218 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1219 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1223 /* Bitfields that are commonly used to encode certain operands' information
1224 may be partially used as part of the base opcode in some instructions.
1225 For example, the bit 1 of the field 'size' in
1226 FCVTXN <Vb><d>, <Va><n>
1227 is actually part of the base opcode, while only size<0> is available
1228 for encoding the register type. Another example is the AdvSIMD
1229 instruction ORR (register), in which the field 'size' is also used for
1230 the base opcode, leaving only the field 'Q' available to encode the
1231 vector register arrangement specifier '8B' or '16B'.
1233 This function tries to deduce the qualifier from the value of partially
1234 constrained field(s). Given the VALUE of such a field or fields, the
1235 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1236 operand encoding), the function returns the matching qualifier or
1237 AARCH64_OPND_QLF_NIL if nothing matches.
1239 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1240 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1241 may end with AARCH64_OPND_QLF_NIL. */
1243 static enum aarch64_opnd_qualifier
1244 get_qualifier_from_partial_encoding (aarch64_insn value,
1245 const enum aarch64_opnd_qualifier* \
1250 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1251 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1253 aarch64_insn standard_value;
1254 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1256 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1257 if ((standard_value & mask) == (value & mask))
1258 return candidates[i];
1260 return AARCH64_OPND_QLF_NIL;
1263 /* Given a list of qualifier sequences, return all possible valid qualifiers
1264 for operand IDX in QUALIFIERS.
1265 Assume QUALIFIERS is an array whose length is large enough. */
1268 get_operand_possible_qualifiers (int idx,
1269 const aarch64_opnd_qualifier_seq_t *list,
1270 enum aarch64_opnd_qualifier *qualifiers)
1273 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1274 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1278 /* Decode the size Q field for e.g. SHADD.
1279 We tag one operand with the qualifer according to the code;
1280 whether the qualifier is valid for this opcode or not, it is the
1281 duty of the semantic checking. */
1284 decode_sizeq (aarch64_inst *inst)
1287 enum aarch64_opnd_qualifier qualifier;
1289 aarch64_insn value, mask;
1290 enum aarch64_field_kind fld_sz;
1291 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1293 if (inst->opcode->iclass == asisdlse
1294 || inst->opcode->iclass == asisdlsep
1295 || inst->opcode->iclass == asisdlso
1296 || inst->opcode->iclass == asisdlsop)
1297 fld_sz = FLD_vldst_size;
1302 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1303 /* Obtain the info that which bits of fields Q and size are actually
1304 available for operand encoding. Opcodes like FMAXNM and FMLA have
1305 size[1] unavailable. */
1306 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1308 /* The index of the operand we are going to tag a qualifier and the qualifer
1309 itself are reasoned from the value of the size and Q fields and the
1310 possible valid qualifier lists. */
1311 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1312 DEBUG_TRACE ("key idx: %d", idx);
1314 /* For most related instruciton, size:Q are fully available for operand
1318 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1322 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1324 #ifdef DEBUG_AARCH64
1328 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1329 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1330 DEBUG_TRACE ("qualifier %d: %s", i,
1331 aarch64_get_qualifier_name(candidates[i]));
1332 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1334 #endif /* DEBUG_AARCH64 */
1336 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1338 if (qualifier == AARCH64_OPND_QLF_NIL)
1341 inst->operands[idx].qualifier = qualifier;
1345 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1346 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1349 decode_asimd_fcvt (aarch64_inst *inst)
1351 aarch64_field field = {0, 0};
1353 enum aarch64_opnd_qualifier qualifier;
1355 gen_sub_field (FLD_size, 0, 1, &field);
1356 value = extract_field_2 (&field, inst->value, 0);
1357 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1358 : AARCH64_OPND_QLF_V_2D;
1359 switch (inst->opcode->op)
1363 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1364 inst->operands[1].qualifier = qualifier;
1368 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1369 inst->operands[0].qualifier = qualifier;
1379 /* Decode size[0], i.e. bit 22, for
1380 e.g. FCVTXN <Vb><d>, <Va><n>. */
1383 decode_asisd_fcvtxn (aarch64_inst *inst)
1385 aarch64_field field = {0, 0};
1386 gen_sub_field (FLD_size, 0, 1, &field);
1387 if (!extract_field_2 (&field, inst->value, 0))
1389 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1393 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1395 decode_fcvt (aarch64_inst *inst)
1397 enum aarch64_opnd_qualifier qualifier;
1399 const aarch64_field field = {15, 2};
1402 value = extract_field_2 (&field, inst->value, 0);
1405 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1406 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1407 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1410 inst->operands[0].qualifier = qualifier;
1415 /* Do miscellaneous decodings that are not common enough to be driven by
1419 do_misc_decoding (aarch64_inst *inst)
1421 switch (inst->opcode->op)
1424 return decode_fcvt (inst);
1429 return decode_asimd_fcvt (inst);
1431 return decode_asisd_fcvtxn (inst);
1437 /* Opcodes that have fields shared by multiple operands are usually flagged
1438 with flags. In this function, we detect such flags, decode the related
1439 field(s) and store the information in one of the related operands. The
1440 'one' operand is not any operand but one of the operands that can
1441 accommadate all the information that has been decoded. */
1444 do_special_decoding (aarch64_inst *inst)
1448 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1449 if (inst->opcode->flags & F_COND)
1451 value = extract_field (FLD_cond2, inst->value, 0);
1452 inst->cond = get_cond_from_value (value);
1455 if (inst->opcode->flags & F_SF)
1457 idx = select_operand_for_sf_field_coding (inst->opcode);
1458 value = extract_field (FLD_sf, inst->value, 0);
1459 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1460 if ((inst->opcode->flags & F_N)
1461 && extract_field (FLD_N, inst->value, 0) != value)
1465 if (inst->opcode->flags & F_LSE_SZ)
1467 idx = select_operand_for_sf_field_coding (inst->opcode);
1468 value = extract_field (FLD_lse_sz, inst->value, 0);
1469 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1471 /* size:Q fields. */
1472 if (inst->opcode->flags & F_SIZEQ)
1473 return decode_sizeq (inst);
1475 if (inst->opcode->flags & F_FPTYPE)
1477 idx = select_operand_for_fptype_field_coding (inst->opcode);
1478 value = extract_field (FLD_type, inst->value, 0);
1481 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1482 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1483 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1488 if (inst->opcode->flags & F_SSIZE)
1490 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1491 of the base opcode. */
1493 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1494 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1495 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1496 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1497 /* For most related instruciton, the 'size' field is fully available for
1498 operand encoding. */
1500 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1503 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1505 inst->operands[idx].qualifier
1506 = get_qualifier_from_partial_encoding (value, candidates, mask);
1510 if (inst->opcode->flags & F_T)
1512 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1515 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1516 == AARCH64_OPND_CLASS_SIMD_REG);
1527 val = extract_field (FLD_imm5, inst->value, 0);
1528 while ((val & 0x1) == 0 && ++num <= 3)
1532 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1533 inst->operands[0].qualifier =
1534 get_vreg_qualifier_from_value ((num << 1) | Q);
1537 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1539 /* Use Rt to encode in the case of e.g.
1540 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1541 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1544 /* Otherwise use the result operand, which has to be a integer
1546 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1547 == AARCH64_OPND_CLASS_INT_REG);
1550 assert (idx == 0 || idx == 1);
1551 value = extract_field (FLD_Q, inst->value, 0);
1552 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1555 if (inst->opcode->flags & F_LDS_SIZE)
1557 aarch64_field field = {0, 0};
1558 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1559 == AARCH64_OPND_CLASS_INT_REG);
1560 gen_sub_field (FLD_opc, 0, 1, &field);
1561 value = extract_field_2 (&field, inst->value, 0);
1562 inst->operands[0].qualifier
1563 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1566 /* Miscellaneous decoding; done as the last step. */
1567 if (inst->opcode->flags & F_MISC)
1568 return do_misc_decoding (inst);
1573 /* Converters converting a real opcode instruction to its alias form. */
1575 /* ROR <Wd>, <Ws>, #<shift>
1577 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1579 convert_extr_to_ror (aarch64_inst *inst)
1581 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1583 copy_operand_info (inst, 2, 3);
1584 inst->operands[3].type = AARCH64_OPND_NIL;
1590 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1592 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1594 convert_shll_to_xtl (aarch64_inst *inst)
1596 if (inst->operands[2].imm.value == 0)
1598 inst->operands[2].type = AARCH64_OPND_NIL;
1605 UBFM <Xd>, <Xn>, #<shift>, #63.
1607 LSR <Xd>, <Xn>, #<shift>. */
1609 convert_bfm_to_sr (aarch64_inst *inst)
1613 imms = inst->operands[3].imm.value;
1614 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1617 inst->operands[3].type = AARCH64_OPND_NIL;
1624 /* Convert MOV to ORR. */
1626 convert_orr_to_mov (aarch64_inst *inst)
1628 /* MOV <Vd>.<T>, <Vn>.<T>
1630 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1631 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1633 inst->operands[2].type = AARCH64_OPND_NIL;
1639 /* When <imms> >= <immr>, the instruction written:
1640 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1642 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1645 convert_bfm_to_bfx (aarch64_inst *inst)
1649 immr = inst->operands[2].imm.value;
1650 imms = inst->operands[3].imm.value;
1654 inst->operands[2].imm.value = lsb;
1655 inst->operands[3].imm.value = imms + 1 - lsb;
1656 /* The two opcodes have different qualifiers for
1657 the immediate operands; reset to help the checking. */
1658 reset_operand_qualifier (inst, 2);
1659 reset_operand_qualifier (inst, 3);
1666 /* When <imms> < <immr>, the instruction written:
1667 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1669 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1672 convert_bfm_to_bfi (aarch64_inst *inst)
1674 int64_t immr, imms, val;
1676 immr = inst->operands[2].imm.value;
1677 imms = inst->operands[3].imm.value;
1678 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1681 inst->operands[2].imm.value = (val - immr) & (val - 1);
1682 inst->operands[3].imm.value = imms + 1;
1683 /* The two opcodes have different qualifiers for
1684 the immediate operands; reset to help the checking. */
1685 reset_operand_qualifier (inst, 2);
1686 reset_operand_qualifier (inst, 3);
1693 /* The instruction written:
1694 BFC <Xd>, #<lsb>, #<width>
1696 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1699 convert_bfm_to_bfc (aarch64_inst *inst)
1701 int64_t immr, imms, val;
1703 /* Should have been assured by the base opcode value. */
1704 assert (inst->operands[1].reg.regno == 0x1f);
1706 immr = inst->operands[2].imm.value;
1707 imms = inst->operands[3].imm.value;
1708 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1711 /* Drop XZR from the second operand. */
1712 copy_operand_info (inst, 1, 2);
1713 copy_operand_info (inst, 2, 3);
1714 inst->operands[3].type = AARCH64_OPND_NIL;
1716 /* Recalculate the immediates. */
1717 inst->operands[1].imm.value = (val - immr) & (val - 1);
1718 inst->operands[2].imm.value = imms + 1;
1720 /* The two opcodes have different qualifiers for the operands; reset to
1721 help the checking. */
1722 reset_operand_qualifier (inst, 1);
1723 reset_operand_qualifier (inst, 2);
1724 reset_operand_qualifier (inst, 3);
1732 /* The instruction written:
1733 LSL <Xd>, <Xn>, #<shift>
1735 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1738 convert_ubfm_to_lsl (aarch64_inst *inst)
1740 int64_t immr = inst->operands[2].imm.value;
1741 int64_t imms = inst->operands[3].imm.value;
1743 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1745 if ((immr == 0 && imms == val) || immr == imms + 1)
1747 inst->operands[3].type = AARCH64_OPND_NIL;
1748 inst->operands[2].imm.value = val - imms;
1755 /* CINC <Wd>, <Wn>, <cond>
1757 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1758 where <cond> is not AL or NV. */
1761 convert_from_csel (aarch64_inst *inst)
1763 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
1764 && (inst->operands[3].cond->value & 0xe) != 0xe)
1766 copy_operand_info (inst, 2, 3);
1767 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1768 inst->operands[3].type = AARCH64_OPND_NIL;
1774 /* CSET <Wd>, <cond>
1776 CSINC <Wd>, WZR, WZR, invert(<cond>)
1777 where <cond> is not AL or NV. */
1780 convert_csinc_to_cset (aarch64_inst *inst)
1782 if (inst->operands[1].reg.regno == 0x1f
1783 && inst->operands[2].reg.regno == 0x1f
1784 && (inst->operands[3].cond->value & 0xe) != 0xe)
1786 copy_operand_info (inst, 1, 3);
1787 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1788 inst->operands[3].type = AARCH64_OPND_NIL;
1789 inst->operands[2].type = AARCH64_OPND_NIL;
1797 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1799 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1800 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1801 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1802 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1803 machine-instruction mnemonic must be used. */
1806 convert_movewide_to_mov (aarch64_inst *inst)
1808 uint64_t value = inst->operands[1].imm.value;
1809 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1810 if (value == 0 && inst->operands[1].shifter.amount != 0)
1812 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1813 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1814 value <<= inst->operands[1].shifter.amount;
1815 /* As an alias convertor, it has to be clear that the INST->OPCODE
1816 is the opcode of the real instruction. */
1817 if (inst->opcode->op == OP_MOVN)
1819 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1821 /* A MOVN has an immediate that could be encoded by MOVZ. */
1822 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1825 inst->operands[1].imm.value = value;
1826 inst->operands[1].shifter.amount = 0;
1832 ORR <Wd>, WZR, #<imm>.
1834 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1835 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1836 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1837 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1838 machine-instruction mnemonic must be used. */
1841 convert_movebitmask_to_mov (aarch64_inst *inst)
1846 /* Should have been assured by the base opcode value. */
1847 assert (inst->operands[1].reg.regno == 0x1f);
1848 copy_operand_info (inst, 1, 2);
1849 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1850 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1851 value = inst->operands[1].imm.value;
1852 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1854 if (inst->operands[0].reg.regno != 0x1f
1855 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1856 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1859 inst->operands[2].type = AARCH64_OPND_NIL;
1863 /* Some alias opcodes are disassembled by being converted from their real-form.
1864 N.B. INST->OPCODE is the real opcode rather than the alias. */
1867 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1873 return convert_bfm_to_sr (inst);
1875 return convert_ubfm_to_lsl (inst);
1879 return convert_from_csel (inst);
1882 return convert_csinc_to_cset (inst);
1886 return convert_bfm_to_bfx (inst);
1890 return convert_bfm_to_bfi (inst);
1892 return convert_bfm_to_bfc (inst);
1894 return convert_orr_to_mov (inst);
1895 case OP_MOV_IMM_WIDE:
1896 case OP_MOV_IMM_WIDEN:
1897 return convert_movewide_to_mov (inst);
1898 case OP_MOV_IMM_LOG:
1899 return convert_movebitmask_to_mov (inst);
1901 return convert_extr_to_ror (inst);
1906 return convert_shll_to_xtl (inst);
1912 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1913 aarch64_inst *, int);
1915 /* Given the instruction information in *INST, check if the instruction has
1916 any alias form that can be used to represent *INST. If the answer is yes,
1917 update *INST to be in the form of the determined alias. */
1919 /* In the opcode description table, the following flags are used in opcode
1920 entries to help establish the relations between the real and alias opcodes:
1922 F_ALIAS: opcode is an alias
1923 F_HAS_ALIAS: opcode has alias(es)
1926 F_P3: Disassembly preference priority 1-3 (the larger the
1927 higher). If nothing is specified, it is the priority
1928 0 by default, i.e. the lowest priority.
1930 Although the relation between the machine and the alias instructions are not
1931 explicitly described, it can be easily determined from the base opcode
1932 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1933 description entries:
1935 The mask of an alias opcode must be equal to or a super-set (i.e. more
1936 constrained) of that of the aliased opcode; so is the base opcode value.
1938 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1939 && (opcode->mask & real->mask) == real->mask
1940 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1941 then OPCODE is an alias of, and only of, the REAL instruction
1943 The alias relationship is forced flat-structured to keep related algorithm
1944 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1946 During the disassembling, the decoding decision tree (in
1947 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1948 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1949 not specified), the disassembler will check whether there is any alias
1950 instruction exists for this real instruction. If there is, the disassembler
1951 will try to disassemble the 32-bit binary again using the alias's rule, or
1952 try to convert the IR to the form of the alias. In the case of the multiple
1953 aliases, the aliases are tried one by one from the highest priority
1954 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1955 first succeeds first adopted.
1957 You may ask why there is a need for the conversion of IR from one form to
1958 another in handling certain aliases. This is because on one hand it avoids
1959 adding more operand code to handle unusual encoding/decoding; on other
1960 hand, during the disassembling, the conversion is an effective approach to
1961 check the condition of an alias (as an alias may be adopted only if certain
1962 conditions are met).
1964 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1965 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1966 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1969 determine_disassembling_preference (struct aarch64_inst *inst)
1971 const aarch64_opcode *opcode;
1972 const aarch64_opcode *alias;
1974 opcode = inst->opcode;
1976 /* This opcode does not have an alias, so use itself. */
1977 if (opcode_has_alias (opcode) == FALSE)
1980 alias = aarch64_find_alias_opcode (opcode);
1983 #ifdef DEBUG_AARCH64
1986 const aarch64_opcode *tmp = alias;
1987 printf ("#### LIST orderd: ");
1990 printf ("%s, ", tmp->name);
1991 tmp = aarch64_find_next_alias_opcode (tmp);
1995 #endif /* DEBUG_AARCH64 */
1997 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1999 DEBUG_TRACE ("try %s", alias->name);
2000 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2002 /* An alias can be a pseudo opcode which will never be used in the
2003 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2005 if (pseudo_opcode_p (alias))
2007 DEBUG_TRACE ("skip pseudo %s", alias->name);
2011 if ((inst->value & alias->mask) != alias->opcode)
2013 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2016 /* No need to do any complicated transformation on operands, if the alias
2017 opcode does not have any operand. */
2018 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2020 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2021 aarch64_replace_opcode (inst, alias);
2024 if (alias->flags & F_CONV)
2027 memcpy (©, inst, sizeof (aarch64_inst));
2028 /* ALIAS is the preference as long as the instruction can be
2029 successfully converted to the form of ALIAS. */
2030 if (convert_to_alias (©, alias) == 1)
2032 aarch64_replace_opcode (©, alias);
2033 assert (aarch64_match_operands_constraint (©, NULL));
2034 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2035 memcpy (inst, ©, sizeof (aarch64_inst));
2041 /* Directly decode the alias opcode. */
2043 memset (&temp, '\0', sizeof (aarch64_inst));
2044 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2046 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2047 memcpy (inst, &temp, sizeof (aarch64_inst));
2054 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2055 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2058 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2059 determined and used to disassemble CODE; this is done just before the
2063 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2064 aarch64_inst *inst, int noaliases_p)
2068 DEBUG_TRACE ("enter with %s", opcode->name);
2070 assert (opcode && inst);
2072 /* Check the base opcode. */
2073 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2075 DEBUG_TRACE ("base opcode match FAIL");
2080 memset (inst, '\0', sizeof (aarch64_inst));
2082 inst->opcode = opcode;
2085 /* Assign operand codes and indexes. */
2086 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2088 if (opcode->operands[i] == AARCH64_OPND_NIL)
2090 inst->operands[i].type = opcode->operands[i];
2091 inst->operands[i].idx = i;
2094 /* Call the opcode decoder indicated by flags. */
2095 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2097 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2101 /* Call operand decoders. */
2102 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2104 const aarch64_operand *opnd;
2105 enum aarch64_opnd type;
2107 type = opcode->operands[i];
2108 if (type == AARCH64_OPND_NIL)
2110 opnd = &aarch64_operands[type];
2111 if (operand_has_extractor (opnd)
2112 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2114 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2119 /* If the opcode has a verifier, then check it now. */
2120 if (opcode->verifier && ! opcode->verifier (opcode, code))
2122 DEBUG_TRACE ("operand verifier FAIL");
2126 /* Match the qualifiers. */
2127 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2129 /* Arriving here, the CODE has been determined as a valid instruction
2130 of OPCODE and *INST has been filled with information of this OPCODE
2131 instruction. Before the return, check if the instruction has any
2132 alias and should be disassembled in the form of its alias instead.
2133 If the answer is yes, *INST will be updated. */
2135 determine_disassembling_preference (inst);
2136 DEBUG_TRACE ("SUCCESS");
2141 DEBUG_TRACE ("constraint matching FAIL");
2148 /* This does some user-friendly fix-up to *INST. It is currently focus on
2149 the adjustment of qualifiers to help the printed instruction
2150 recognized/understood more easily. */
2153 user_friendly_fixup (aarch64_inst *inst)
2155 switch (inst->opcode->iclass)
2158 /* TBNZ Xn|Wn, #uimm6, label
2159 Test and Branch Not Zero: conditionally jumps to label if bit number
2160 uimm6 in register Xn is not zero. The bit number implies the width of
2161 the register, which may be written and should be disassembled as Wn if
2162 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2164 if (inst->operands[1].imm.value < 32)
2165 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2171 /* Decode INSN and fill in *INST the instruction information. An alias
2172 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2176 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2177 bfd_boolean noaliases_p)
2179 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2181 #ifdef DEBUG_AARCH64
2184 const aarch64_opcode *tmp = opcode;
2186 DEBUG_TRACE ("opcode lookup:");
2189 aarch64_verbose (" %s", tmp->name);
2190 tmp = aarch64_find_next_opcode (tmp);
2193 #endif /* DEBUG_AARCH64 */
2195 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2196 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2197 opcode field and value, apart from the difference that one of them has an
2198 extra field as part of the opcode, but such a field is used for operand
2199 encoding in other opcode(s) ('immh' in the case of the example). */
2200 while (opcode != NULL)
2202 /* But only one opcode can be decoded successfully for, as the
2203 decoding routine will check the constraint carefully. */
2204 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2206 opcode = aarch64_find_next_opcode (opcode);
2212 /* Print operands. */
2215 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2216 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2218 int i, pcrel_p, num_printed;
2219 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2222 /* We regard the opcode operand info more, however we also look into
2223 the inst->operands to support the disassembling of the optional
2225 The two operand code should be the same in all cases, apart from
2226 when the operand can be optional. */
2227 if (opcode->operands[i] == AARCH64_OPND_NIL
2228 || opnds[i].type == AARCH64_OPND_NIL)
2231 /* Generate the operand string in STR. */
2232 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2235 /* Print the delimiter (taking account of omitted operand(s)). */
2237 (*info->fprintf_func) (info->stream, "%s",
2238 num_printed++ == 0 ? "\t" : ", ");
2240 /* Print the operand. */
2242 (*info->print_address_func) (info->target, info);
2244 (*info->fprintf_func) (info->stream, "%s", str);
2248 /* Print the instruction mnemonic name. */
2251 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2253 if (inst->opcode->flags & F_COND)
2255 /* For instructions that are truly conditionally executed, e.g. b.cond,
2256 prepare the full mnemonic name with the corresponding condition
2261 ptr = strchr (inst->opcode->name, '.');
2262 assert (ptr && inst->cond);
2263 len = ptr - inst->opcode->name;
2265 strncpy (name, inst->opcode->name, len);
2267 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2270 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2273 /* Print the instruction according to *INST. */
2276 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2277 struct disassemble_info *info)
2279 print_mnemonic_name (inst, info);
2280 print_operands (pc, inst->opcode, inst->operands, info);
2283 /* Entry-point of the instruction disassembler and printer. */
2286 print_insn_aarch64_word (bfd_vma pc,
2288 struct disassemble_info *info)
2290 static const char *err_msg[6] =
2293 [-ERR_UND] = "undefined",
2294 [-ERR_UNP] = "unpredictable",
2301 info->insn_info_valid = 1;
2302 info->branch_delay_insns = 0;
2303 info->data_size = 0;
2307 if (info->flags & INSN_HAS_RELOC)
2308 /* If the instruction has a reloc associated with it, then
2309 the offset field in the instruction will actually be the
2310 addend for the reloc. (If we are using REL type relocs).
2311 In such cases, we can ignore the pc when computing
2312 addresses, since the addend is not currently pc-relative. */
2315 ret = aarch64_decode_insn (word, &inst, no_aliases);
2317 if (((word >> 21) & 0x3ff) == 1)
2319 /* RESERVED for ALES. */
2320 assert (ret != ERR_OK);
2329 /* Handle undefined instructions. */
2330 info->insn_type = dis_noninsn;
2331 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2332 word, err_msg[-ret]);
2335 user_friendly_fixup (&inst);
2336 print_aarch64_insn (pc, &inst, info);
2343 /* Disallow mapping symbols ($x, $d etc) from
2344 being displayed in symbol relative addresses. */
2347 aarch64_symbol_is_valid (asymbol * sym,
2348 struct disassemble_info * info ATTRIBUTE_UNUSED)
2355 name = bfd_asymbol_name (sym);
2359 || (name[1] != 'x' && name[1] != 'd')
2360 || (name[2] != '\0' && name[2] != '.'));
2363 /* Print data bytes on INFO->STREAM. */
2366 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2368 struct disassemble_info *info)
2370 switch (info->bytes_per_chunk)
2373 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2376 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2379 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2386 /* Try to infer the code or data type from a symbol.
2387 Returns nonzero if *MAP_TYPE was set. */
2390 get_sym_code_type (struct disassemble_info *info, int n,
2391 enum map_type *map_type)
2393 elf_symbol_type *es;
2397 es = *(elf_symbol_type **)(info->symtab + n);
2398 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2400 /* If the symbol has function type then use that. */
2401 if (type == STT_FUNC)
2403 *map_type = MAP_INSN;
2407 /* Check for mapping symbols. */
2408 name = bfd_asymbol_name(info->symtab[n]);
2410 && (name[1] == 'x' || name[1] == 'd')
2411 && (name[2] == '\0' || name[2] == '.'))
2413 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2420 /* Entry-point of the AArch64 disassembler. */
2423 print_insn_aarch64 (bfd_vma pc,
2424 struct disassemble_info *info)
2426 bfd_byte buffer[INSNLEN];
2428 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2429 bfd_boolean found = FALSE;
2430 unsigned int size = 4;
2433 if (info->disassembler_options)
2435 set_default_aarch64_dis_options (info);
2437 parse_aarch64_dis_options (info->disassembler_options);
2439 /* To avoid repeated parsing of these options, we remove them here. */
2440 info->disassembler_options = NULL;
2443 /* Aarch64 instructions are always little-endian */
2444 info->endian_code = BFD_ENDIAN_LITTLE;
2446 /* First check the full symtab for a mapping symbol, even if there
2447 are no usable non-mapping symbols for this address. */
2448 if (info->symtab_size != 0
2449 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2451 enum map_type type = MAP_INSN;
2456 if (pc <= last_mapping_addr)
2457 last_mapping_sym = -1;
2459 /* Start scanning at the start of the function, or wherever
2460 we finished last time. */
2461 n = info->symtab_pos + 1;
2462 if (n < last_mapping_sym)
2463 n = last_mapping_sym;
2465 /* Scan up to the location being disassembled. */
2466 for (; n < info->symtab_size; n++)
2468 addr = bfd_asymbol_value (info->symtab[n]);
2471 if ((info->section == NULL
2472 || info->section == info->symtab[n]->section)
2473 && get_sym_code_type (info, n, &type))
2482 n = info->symtab_pos;
2483 if (n < last_mapping_sym)
2484 n = last_mapping_sym;
2486 /* No mapping symbol found at this address. Look backwards
2487 for a preceeding one. */
2490 if (get_sym_code_type (info, n, &type))
2499 last_mapping_sym = last_sym;
2502 /* Look a little bit ahead to see if we should print out
2503 less than four bytes of data. If there's a symbol,
2504 mapping or otherwise, after two bytes then don't
2506 if (last_type == MAP_DATA)
2508 size = 4 - (pc & 3);
2509 for (n = last_sym + 1; n < info->symtab_size; n++)
2511 addr = bfd_asymbol_value (info->symtab[n]);
2514 if (addr - pc < size)
2519 /* If the next symbol is after three bytes, we need to
2520 print only part of the data, so that we can use either
2523 size = (pc & 1) ? 1 : 2;
2527 if (last_type == MAP_DATA)
2529 /* size was set above. */
2530 info->bytes_per_chunk = size;
2531 info->display_endian = info->endian;
2532 printer = print_insn_data;
2536 info->bytes_per_chunk = size = INSNLEN;
2537 info->display_endian = info->endian_code;
2538 printer = print_insn_aarch64_word;
2541 status = (*info->read_memory_func) (pc, buffer, size, info);
2544 (*info->memory_error_func) (status, pc, info);
2548 data = bfd_get_bits (buffer, size * 8,
2549 info->display_endian == BFD_ENDIAN_BIG);
2551 (*printer) (pc, data, info);
2557 print_aarch64_disassembler_options (FILE *stream)
2559 fprintf (stream, _("\n\
2560 The following AARCH64 specific disassembler options are supported for use\n\
2561 with the -M switch (multiple options should be separated by commas):\n"));
2563 fprintf (stream, _("\n\
2564 no-aliases Don't print instruction aliases.\n"));
2566 fprintf (stream, _("\n\
2567 aliases Do print instruction aliases.\n"));
2569 #ifdef DEBUG_AARCH64
2570 fprintf (stream, _("\n\
2571 debug_dump Temp switch for debug trace.\n"));
2572 #endif /* DEBUG_AARCH64 */
2574 fprintf (stream, _("\n"));