1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
66 if (CONST_STRNEQ (option, "aliases"))
73 if (CONST_STRNEQ (option, "debug_dump"))
78 #endif /* DEBUG_AARCH64 */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
85 parse_aarch64_dis_options (const char *options)
87 const char *option_end;
92 while (*options != '\0')
94 /* Skip empty options. */
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
106 parse_aarch64_dis_option (options, option_end - options);
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
114 /* Functions doing the instruction disassembling. */
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
135 num = va_arg (va, uint32_t);
137 aarch64_insn value = 0x0;
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
156 enum aarch64_field_kind kind;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
172 uint32_t ret = value;
175 if ((value >> i) & 0x1)
177 uint32_t val = (uint32_t)(-1) << i;
180 return (int32_t) ret;
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
204 /* Instructions using vector type 2H should not call this function. Skip over
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
238 return qualifiers[i];
240 return AARCH64_OPND_QLF_NIL;
243 /* Operand extractors. */
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
250 info->reg.regno = extract_field (self->fields[0], code, 0);
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
259 assert (info->idx == 1
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
265 /* e.g. IC <ic_op>{, <Xt>}. */
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
290 info->reglane.regno = extract_field (self->fields[0], code,
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
328 else if (inst->opcode->iclass == dotproduct)
330 /* Need information in other operand(s) to help decoding. */
331 info->qualifier = get_expected_qualifier (inst, info->idx);
332 switch (info->qualifier)
334 case AARCH64_OPND_QLF_S_B:
336 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
337 info->reglane.regno &= 0x1f;
343 else if (inst->opcode->iclass == cryptosm3)
345 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
346 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
350 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
351 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
353 /* Need information in other operand(s) to help decoding. */
354 info->qualifier = get_expected_qualifier (inst, info->idx);
355 switch (info->qualifier)
357 case AARCH64_OPND_QLF_S_H:
359 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
361 info->reglane.regno &= 0xf;
363 case AARCH64_OPND_QLF_S_S:
365 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
367 case AARCH64_OPND_QLF_S_D:
369 info->reglane.index = extract_field (FLD_H, code, 0);
375 if (inst->opcode->op == OP_FCMLA_ELEM)
377 /* Complex operand takes two elements. */
378 if (info->reglane.index & 1)
380 info->reglane.index /= 2;
388 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
389 const aarch64_insn code,
390 const aarch64_inst *inst ATTRIBUTE_UNUSED)
393 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
395 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
399 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
401 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
402 aarch64_opnd_info *info, const aarch64_insn code,
403 const aarch64_inst *inst)
406 /* Number of elements in each structure to be loaded/stored. */
407 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
411 unsigned is_reserved;
413 unsigned num_elements;
429 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
431 value = extract_field (FLD_opcode, code, 0);
432 /* PR 21595: Check for a bogus value. */
433 if (value >= ARRAY_SIZE (data))
435 if (expected_num != data[value].num_elements || data[value].is_reserved)
437 info->reglist.num_regs = data[value].num_regs;
442 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
443 lanes instructions. */
445 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
446 aarch64_opnd_info *info, const aarch64_insn code,
447 const aarch64_inst *inst)
452 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
454 value = extract_field (FLD_S, code, 0);
456 /* Number of registers is equal to the number of elements in
457 each structure to be loaded/stored. */
458 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
459 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
461 /* Except when it is LD1R. */
462 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
463 info->reglist.num_regs = 2;
468 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
469 load/store single element instructions. */
471 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
472 aarch64_opnd_info *info, const aarch64_insn code,
473 const aarch64_inst *inst ATTRIBUTE_UNUSED)
475 aarch64_field field = {0, 0};
476 aarch64_insn QSsize; /* fields Q:S:size. */
477 aarch64_insn opcodeh2; /* opcode<2:1> */
480 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
482 /* Decode the index, opcode<2:1> and size. */
483 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
484 opcodeh2 = extract_field_2 (&field, code, 0);
485 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
489 info->qualifier = AARCH64_OPND_QLF_S_B;
490 /* Index encoded in "Q:S:size". */
491 info->reglist.index = QSsize;
497 info->qualifier = AARCH64_OPND_QLF_S_H;
498 /* Index encoded in "Q:S:size<1>". */
499 info->reglist.index = QSsize >> 1;
502 if ((QSsize >> 1) & 0x1)
505 if ((QSsize & 0x1) == 0)
507 info->qualifier = AARCH64_OPND_QLF_S_S;
508 /* Index encoded in "Q:S". */
509 info->reglist.index = QSsize >> 2;
513 if (extract_field (FLD_S, code, 0))
516 info->qualifier = AARCH64_OPND_QLF_S_D;
517 /* Index encoded in "Q". */
518 info->reglist.index = QSsize >> 3;
525 info->reglist.has_index = 1;
526 info->reglist.num_regs = 0;
527 /* Number of registers is equal to the number of elements in
528 each structure to be loaded/stored. */
529 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
530 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
535 /* Decode fields immh:immb and/or Q for e.g.
536 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
537 or SSHR <V><d>, <V><n>, #<shift>. */
540 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
541 aarch64_opnd_info *info, const aarch64_insn code,
542 const aarch64_inst *inst)
545 aarch64_insn Q, imm, immh;
546 enum aarch64_insn_class iclass = inst->opcode->iclass;
548 immh = extract_field (FLD_immh, code, 0);
551 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
553 /* Get highest set bit in immh. */
554 while (--pos >= 0 && (immh & 0x8) == 0)
557 assert ((iclass == asimdshf || iclass == asisdshf)
558 && (info->type == AARCH64_OPND_IMM_VLSR
559 || info->type == AARCH64_OPND_IMM_VLSL));
561 if (iclass == asimdshf)
563 Q = extract_field (FLD_Q, code, 0);
565 0000 x SEE AdvSIMD modified immediate
575 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
578 info->qualifier = get_sreg_qualifier_from_value (pos);
580 if (info->type == AARCH64_OPND_IMM_VLSR)
582 0000 SEE AdvSIMD modified immediate
583 0001 (16-UInt(immh:immb))
584 001x (32-UInt(immh:immb))
585 01xx (64-UInt(immh:immb))
586 1xxx (128-UInt(immh:immb)) */
587 info->imm.value = (16 << pos) - imm;
591 0000 SEE AdvSIMD modified immediate
592 0001 (UInt(immh:immb)-8)
593 001x (UInt(immh:immb)-16)
594 01xx (UInt(immh:immb)-32)
595 1xxx (UInt(immh:immb)-64) */
596 info->imm.value = imm - (8 << pos);
601 /* Decode shift immediate for e.g. sshr (imm). */
603 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
604 aarch64_opnd_info *info, const aarch64_insn code,
605 const aarch64_inst *inst ATTRIBUTE_UNUSED)
609 val = extract_field (FLD_size, code, 0);
612 case 0: imm = 8; break;
613 case 1: imm = 16; break;
614 case 2: imm = 32; break;
617 info->imm.value = imm;
621 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
622 value in the field(s) will be extracted as unsigned immediate value. */
624 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
625 const aarch64_insn code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
630 imm = extract_all_fields (self, code);
632 if (operand_need_sign_extension (self))
633 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
635 if (operand_need_shift_by_two (self))
638 if (info->type == AARCH64_OPND_ADDR_ADRP)
641 info->imm.value = imm;
645 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
647 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
648 const aarch64_insn code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
651 aarch64_ext_imm (self, info, code, inst);
652 info->shifter.kind = AARCH64_MOD_LSL;
653 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
657 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
658 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
660 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
661 aarch64_opnd_info *info,
662 const aarch64_insn code,
663 const aarch64_inst *inst ATTRIBUTE_UNUSED)
666 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
667 aarch64_field field = {0, 0};
669 assert (info->idx == 1);
671 if (info->type == AARCH64_OPND_SIMD_FPIMM)
674 /* a:b:c:d:e:f:g:h */
675 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
676 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
678 /* Either MOVI <Dd>, #<imm>
679 or MOVI <Vd>.2D, #<imm>.
680 <imm> is a 64-bit immediate
681 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
682 encoded in "a:b:c:d:e:f:g:h". */
684 unsigned abcdefgh = imm;
685 for (imm = 0ull, i = 0; i < 8; i++)
686 if (((abcdefgh >> i) & 0x1) != 0)
687 imm |= 0xffull << (8 * i);
689 info->imm.value = imm;
692 info->qualifier = get_expected_qualifier (inst, info->idx);
693 switch (info->qualifier)
695 case AARCH64_OPND_QLF_NIL:
697 info->shifter.kind = AARCH64_MOD_NONE;
699 case AARCH64_OPND_QLF_LSL:
701 info->shifter.kind = AARCH64_MOD_LSL;
702 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
704 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
705 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
706 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
707 default: assert (0); return 0;
709 /* 00: 0; 01: 8; 10:16; 11:24. */
710 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
712 case AARCH64_OPND_QLF_MSL:
714 info->shifter.kind = AARCH64_MOD_MSL;
715 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
716 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
726 /* Decode an 8-bit floating-point immediate. */
728 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
729 const aarch64_insn code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED)
732 info->imm.value = extract_all_fields (self, code);
737 /* Decode a 1-bit rotate immediate (#90 or #270). */
739 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
740 const aarch64_insn code,
741 const aarch64_inst *inst ATTRIBUTE_UNUSED)
743 uint64_t rot = extract_field (self->fields[0], code, 0);
745 info->imm.value = rot * 180 + 90;
749 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
751 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
752 const aarch64_insn code,
753 const aarch64_inst *inst ATTRIBUTE_UNUSED)
755 uint64_t rot = extract_field (self->fields[0], code, 0);
757 info->imm.value = rot * 90;
761 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
763 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
764 aarch64_opnd_info *info, const aarch64_insn code,
765 const aarch64_inst *inst ATTRIBUTE_UNUSED)
767 info->imm.value = 64- extract_field (FLD_scale, code, 0);
771 /* Decode arithmetic immediate for e.g.
772 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
774 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
775 aarch64_opnd_info *info, const aarch64_insn code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED)
780 info->shifter.kind = AARCH64_MOD_LSL;
782 value = extract_field (FLD_shift, code, 0);
785 info->shifter.amount = value ? 12 : 0;
786 /* imm12 (unsigned) */
787 info->imm.value = extract_field (FLD_imm12, code, 0);
792 /* Return true if VALUE is a valid logical immediate encoding, storing the
793 decoded value in *RESULT if so. ESIZE is the number of bytes in the
794 decoded immediate. */
796 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
802 /* value is N:immr:imms. */
804 R = (value >> 6) & 0x3f;
805 N = (value >> 12) & 0x1;
807 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
808 (in other words, right rotated by R), then replicated. */
812 mask = 0xffffffffffffffffull;
818 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
819 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
820 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
821 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
822 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
825 mask = (1ull << simd_size) - 1;
826 /* Top bits are IGNORED. */
830 if (simd_size > esize * 8)
833 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
834 if (S == simd_size - 1)
836 /* S+1 consecutive bits to 1. */
837 /* NOTE: S can't be 63 due to detection above. */
838 imm = (1ull << (S + 1)) - 1;
839 /* Rotate to the left by simd_size - R. */
841 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
842 /* Replicate the value according to SIMD size. */
845 case 2: imm = (imm << 2) | imm;
847 case 4: imm = (imm << 4) | imm;
849 case 8: imm = (imm << 8) | imm;
851 case 16: imm = (imm << 16) | imm;
853 case 32: imm = (imm << 32) | imm;
856 default: assert (0); return 0;
859 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
864 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
866 aarch64_ext_limm (const aarch64_operand *self,
867 aarch64_opnd_info *info, const aarch64_insn code,
868 const aarch64_inst *inst)
873 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
875 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
876 return decode_limm (esize, value, &info->imm.value);
879 /* Decode a logical immediate for the BIC alias of AND (etc.). */
881 aarch64_ext_inv_limm (const aarch64_operand *self,
882 aarch64_opnd_info *info, const aarch64_insn code,
883 const aarch64_inst *inst)
885 if (!aarch64_ext_limm (self, info, code, inst))
887 info->imm.value = ~info->imm.value;
891 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
892 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
894 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
895 aarch64_opnd_info *info,
896 const aarch64_insn code, const aarch64_inst *inst)
901 info->reg.regno = extract_field (FLD_Rt, code, 0);
904 value = extract_field (FLD_ldst_size, code, 0);
905 if (inst->opcode->iclass == ldstpair_indexed
906 || inst->opcode->iclass == ldstnapair_offs
907 || inst->opcode->iclass == ldstpair_off
908 || inst->opcode->iclass == loadlit)
910 enum aarch64_opnd_qualifier qualifier;
913 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
914 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
915 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
918 info->qualifier = qualifier;
923 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
926 info->qualifier = get_sreg_qualifier_from_value (value);
932 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
934 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
935 aarch64_opnd_info *info,
937 const aarch64_inst *inst ATTRIBUTE_UNUSED)
940 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
944 /* Decode the address operand for e.g.
945 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
947 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
948 aarch64_opnd_info *info,
949 aarch64_insn code, const aarch64_inst *inst)
951 info->qualifier = get_expected_qualifier (inst, info->idx);
954 info->addr.base_regno = extract_field (self->fields[0], code, 0);
957 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
958 info->addr.offset.imm = sign_extend (imm, 8);
959 if (extract_field (self->fields[2], code, 0) == 1) {
960 info->addr.writeback = 1;
961 info->addr.preind = 1;
966 /* Decode the address operand for e.g.
967 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
969 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
970 aarch64_opnd_info *info,
971 aarch64_insn code, const aarch64_inst *inst)
973 aarch64_insn S, value;
976 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
978 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
980 value = extract_field (FLD_option, code, 0);
982 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
983 /* Fix-up the shifter kind; although the table-driven approach is
984 efficient, it is slightly inflexible, thus needing this fix-up. */
985 if (info->shifter.kind == AARCH64_MOD_UXTX)
986 info->shifter.kind = AARCH64_MOD_LSL;
988 S = extract_field (FLD_S, code, 0);
991 info->shifter.amount = 0;
992 info->shifter.amount_present = 0;
997 /* Need information in other operand(s) to help achieve the decoding
999 info->qualifier = get_expected_qualifier (inst, info->idx);
1000 /* Get the size of the data element that is accessed, which may be
1001 different from that of the source register size, e.g. in strb/ldrb. */
1002 size = aarch64_get_qualifier_esize (info->qualifier);
1003 info->shifter.amount = get_logsz (size);
1004 info->shifter.amount_present = 1;
1010 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1012 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1013 aarch64_insn code, const aarch64_inst *inst)
1016 info->qualifier = get_expected_qualifier (inst, info->idx);
1019 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1020 /* simm (imm9 or imm7) */
1021 imm = extract_field (self->fields[0], code, 0);
1022 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1023 if (self->fields[0] == FLD_imm7)
1024 /* scaled immediate in ld/st pair instructions. */
1025 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1027 if (inst->opcode->iclass == ldst_unscaled
1028 || inst->opcode->iclass == ldstnapair_offs
1029 || inst->opcode->iclass == ldstpair_off
1030 || inst->opcode->iclass == ldst_unpriv)
1031 info->addr.writeback = 0;
1034 /* pre/post- index */
1035 info->addr.writeback = 1;
1036 if (extract_field (self->fields[1], code, 0) == 1)
1037 info->addr.preind = 1;
1039 info->addr.postind = 1;
1045 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1047 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1049 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1052 info->qualifier = get_expected_qualifier (inst, info->idx);
1053 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1055 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1057 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1061 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1063 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1065 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1069 info->qualifier = get_expected_qualifier (inst, info->idx);
1071 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1073 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1074 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1075 if (extract_field (self->fields[3], code, 0) == 1) {
1076 info->addr.writeback = 1;
1077 info->addr.preind = 1;
1082 /* Decode the address operand for e.g.
1083 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1085 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1086 aarch64_opnd_info *info,
1087 aarch64_insn code, const aarch64_inst *inst)
1089 /* The opcode dependent area stores the number of elements in
1090 each structure to be loaded/stored. */
1091 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1094 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1095 /* Rm | #<amount> */
1096 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1097 if (info->addr.offset.regno == 31)
1099 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1100 /* Special handling of loading single structure to all lane. */
1101 info->addr.offset.imm = (is_ld1r ? 1
1102 : inst->operands[0].reglist.num_regs)
1103 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1105 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1106 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1107 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1110 info->addr.offset.is_reg = 1;
1111 info->addr.writeback = 1;
1116 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1118 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1119 aarch64_opnd_info *info,
1120 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1124 value = extract_field (FLD_cond, code, 0);
1125 info->cond = get_cond_from_value (value);
1129 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1131 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1132 aarch64_opnd_info *info,
1134 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1136 /* op0:op1:CRn:CRm:op2 */
1137 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1142 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1144 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1145 aarch64_opnd_info *info, aarch64_insn code,
1146 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1150 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1151 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1152 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1154 /* Reserved value in <pstatefield>. */
1158 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1160 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1161 aarch64_opnd_info *info,
1163 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1167 const aarch64_sys_ins_reg *sysins_ops;
1168 /* op0:op1:CRn:CRm:op2 */
1169 value = extract_fields (code, 0, 5,
1170 FLD_op0, FLD_op1, FLD_CRn,
1175 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1176 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1177 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1178 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1179 default: assert (0); return 0;
1182 for (i = 0; sysins_ops[i].name != NULL; ++i)
1183 if (sysins_ops[i].value == value)
1185 info->sysins_op = sysins_ops + i;
1186 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1187 info->sysins_op->name,
1188 (unsigned)info->sysins_op->value,
1189 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1196 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1199 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1200 aarch64_opnd_info *info,
1202 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1205 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1209 /* Decode the prefetch operation option operand for e.g.
1210 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1213 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1214 aarch64_opnd_info *info,
1215 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1218 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1222 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1223 to the matching name/value pair in aarch64_hint_options. */
1226 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1227 aarch64_opnd_info *info,
1229 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1232 unsigned hint_number;
1235 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1237 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1239 if (hint_number == aarch64_hint_options[i].value)
1241 info->hint_option = &(aarch64_hint_options[i]);
1249 /* Decode the extended register operand for e.g.
1250 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1252 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1253 aarch64_opnd_info *info,
1255 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1260 info->reg.regno = extract_field (FLD_Rm, code, 0);
1262 value = extract_field (FLD_option, code, 0);
1263 info->shifter.kind =
1264 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1266 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1268 /* This makes the constraint checking happy. */
1269 info->shifter.operator_present = 1;
1271 /* Assume inst->operands[0].qualifier has been resolved. */
1272 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1273 info->qualifier = AARCH64_OPND_QLF_W;
1274 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1275 && (info->shifter.kind == AARCH64_MOD_UXTX
1276 || info->shifter.kind == AARCH64_MOD_SXTX))
1277 info->qualifier = AARCH64_OPND_QLF_X;
1282 /* Decode the shifted register operand for e.g.
1283 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1285 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1286 aarch64_opnd_info *info,
1288 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1293 info->reg.regno = extract_field (FLD_Rm, code, 0);
1295 value = extract_field (FLD_shift, code, 0);
1296 info->shifter.kind =
1297 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1298 if (info->shifter.kind == AARCH64_MOD_ROR
1299 && inst->opcode->iclass != log_shift)
1300 /* ROR is not available for the shifted register operand in arithmetic
1304 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1306 /* This makes the constraint checking happy. */
1307 info->shifter.operator_present = 1;
1312 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1313 where <offset> is given by the OFFSET parameter and where <factor> is
1314 1 plus SELF's operand-dependent value. fields[0] specifies the field
1315 that holds <base>. */
1317 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1318 aarch64_opnd_info *info, aarch64_insn code,
1321 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1322 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1323 info->addr.offset.is_reg = FALSE;
1324 info->addr.writeback = FALSE;
1325 info->addr.preind = TRUE;
1327 info->shifter.kind = AARCH64_MOD_MUL_VL;
1328 info->shifter.amount = 1;
1329 info->shifter.operator_present = (info->addr.offset.imm != 0);
1330 info->shifter.amount_present = FALSE;
1334 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1335 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1336 SELF's operand-dependent value. fields[0] specifies the field that
1337 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1339 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1340 aarch64_opnd_info *info, aarch64_insn code,
1341 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1345 offset = extract_field (FLD_SVE_imm4, code, 0);
1346 offset = ((offset + 8) & 15) - 8;
1347 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1350 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1351 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1352 SELF's operand-dependent value. fields[0] specifies the field that
1353 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1355 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1356 aarch64_opnd_info *info, aarch64_insn code,
1357 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1361 offset = extract_field (FLD_SVE_imm6, code, 0);
1362 offset = (((offset + 32) & 63) - 32);
1363 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1366 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1367 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1368 SELF's operand-dependent value. fields[0] specifies the field that
1369 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1370 and imm3 fields, with imm3 being the less-significant part. */
1372 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1373 aarch64_opnd_info *info,
1375 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1379 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1380 offset = (((offset + 256) & 511) - 256);
1381 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1384 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1385 is given by the OFFSET parameter and where <shift> is SELF's operand-
1386 dependent value. fields[0] specifies the base register field <base>. */
1388 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1389 aarch64_opnd_info *info, aarch64_insn code,
1392 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1393 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1394 info->addr.offset.is_reg = FALSE;
1395 info->addr.writeback = FALSE;
1396 info->addr.preind = TRUE;
1397 info->shifter.operator_present = FALSE;
1398 info->shifter.amount_present = FALSE;
1402 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1403 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1404 value. fields[0] specifies the base register field. */
1406 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1407 aarch64_opnd_info *info, aarch64_insn code,
1408 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1410 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1411 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1414 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1415 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1416 value. fields[0] specifies the base register field. */
1418 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1419 aarch64_opnd_info *info, aarch64_insn code,
1420 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1422 int offset = extract_field (FLD_SVE_imm6, code, 0);
1423 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1426 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1427 is SELF's operand-dependent value. fields[0] specifies the base
1428 register field and fields[1] specifies the offset register field. */
1430 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1431 aarch64_opnd_info *info, aarch64_insn code,
1432 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1436 index_regno = extract_field (self->fields[1], code, 0);
1437 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1440 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1441 info->addr.offset.regno = index_regno;
1442 info->addr.offset.is_reg = TRUE;
1443 info->addr.writeback = FALSE;
1444 info->addr.preind = TRUE;
1445 info->shifter.kind = AARCH64_MOD_LSL;
1446 info->shifter.amount = get_operand_specific_data (self);
1447 info->shifter.operator_present = (info->shifter.amount != 0);
1448 info->shifter.amount_present = (info->shifter.amount != 0);
1452 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1453 <shift> is SELF's operand-dependent value. fields[0] specifies the
1454 base register field, fields[1] specifies the offset register field and
1455 fields[2] is a single-bit field that selects SXTW over UXTW. */
1457 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1458 aarch64_opnd_info *info, aarch64_insn code,
1459 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1461 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1462 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1463 info->addr.offset.is_reg = TRUE;
1464 info->addr.writeback = FALSE;
1465 info->addr.preind = TRUE;
1466 if (extract_field (self->fields[2], code, 0))
1467 info->shifter.kind = AARCH64_MOD_SXTW;
1469 info->shifter.kind = AARCH64_MOD_UXTW;
1470 info->shifter.amount = get_operand_specific_data (self);
1471 info->shifter.operator_present = TRUE;
1472 info->shifter.amount_present = (info->shifter.amount != 0);
1476 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1477 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1478 fields[0] specifies the base register field. */
1480 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1481 aarch64_opnd_info *info, aarch64_insn code,
1482 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1484 int offset = extract_field (FLD_imm5, code, 0);
1485 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1488 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1489 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1490 number. fields[0] specifies the base register field and fields[1]
1491 specifies the offset register field. */
1493 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1494 aarch64_insn code, enum aarch64_modifier_kind kind)
1496 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1497 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1498 info->addr.offset.is_reg = TRUE;
1499 info->addr.writeback = FALSE;
1500 info->addr.preind = TRUE;
1501 info->shifter.kind = kind;
1502 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1503 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1504 || info->shifter.amount != 0);
1505 info->shifter.amount_present = (info->shifter.amount != 0);
1509 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1510 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1511 field and fields[1] specifies the offset register field. */
1513 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1514 aarch64_opnd_info *info, aarch64_insn code,
1515 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1517 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1520 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1521 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1522 field and fields[1] specifies the offset register field. */
1524 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1525 aarch64_opnd_info *info, aarch64_insn code,
1526 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1528 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1531 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1532 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1533 field and fields[1] specifies the offset register field. */
1535 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1536 aarch64_opnd_info *info, aarch64_insn code,
1537 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1539 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1542 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1543 has the raw field value and that the low 8 bits decode to VALUE. */
1545 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1547 info->shifter.kind = AARCH64_MOD_LSL;
1548 info->shifter.amount = 0;
1549 if (info->imm.value & 0x100)
1552 /* Decode 0x100 as #0, LSL #8. */
1553 info->shifter.amount = 8;
1557 info->shifter.operator_present = (info->shifter.amount != 0);
1558 info->shifter.amount_present = (info->shifter.amount != 0);
1559 info->imm.value = value;
1563 /* Decode an SVE ADD/SUB immediate. */
1565 aarch64_ext_sve_aimm (const aarch64_operand *self,
1566 aarch64_opnd_info *info, const aarch64_insn code,
1567 const aarch64_inst *inst)
1569 return (aarch64_ext_imm (self, info, code, inst)
1570 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1573 /* Decode an SVE CPY/DUP immediate. */
1575 aarch64_ext_sve_asimm (const aarch64_operand *self,
1576 aarch64_opnd_info *info, const aarch64_insn code,
1577 const aarch64_inst *inst)
1579 return (aarch64_ext_imm (self, info, code, inst)
1580 && decode_sve_aimm (info, (int8_t) info->imm.value));
1583 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1584 The fields array specifies which field to use. */
1586 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1587 aarch64_opnd_info *info, aarch64_insn code,
1588 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1590 if (extract_field (self->fields[0], code, 0))
1591 info->imm.value = 0x3f800000;
1593 info->imm.value = 0x3f000000;
1594 info->imm.is_fp = TRUE;
1598 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1599 The fields array specifies which field to use. */
1601 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1602 aarch64_opnd_info *info, aarch64_insn code,
1603 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1605 if (extract_field (self->fields[0], code, 0))
1606 info->imm.value = 0x40000000;
1608 info->imm.value = 0x3f000000;
1609 info->imm.is_fp = TRUE;
1613 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1614 The fields array specifies which field to use. */
1616 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1617 aarch64_opnd_info *info, aarch64_insn code,
1618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1620 if (extract_field (self->fields[0], code, 0))
1621 info->imm.value = 0x3f800000;
1623 info->imm.value = 0x0;
1624 info->imm.is_fp = TRUE;
1628 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1629 array specifies which field to use for Zn. MM is encoded in the
1630 concatenation of imm5 and SVE_tszh, with imm5 being the less
1631 significant part. */
1633 aarch64_ext_sve_index (const aarch64_operand *self,
1634 aarch64_opnd_info *info, aarch64_insn code,
1635 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1639 info->reglane.regno = extract_field (self->fields[0], code, 0);
1640 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1641 if ((val & 31) == 0)
1643 while ((val & 1) == 0)
1645 info->reglane.index = val / 2;
1649 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1651 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1652 aarch64_opnd_info *info, const aarch64_insn code,
1653 const aarch64_inst *inst)
1655 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1656 return (aarch64_ext_limm (self, info, code, inst)
1657 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1660 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1661 and where MM occupies the most-significant part. The operand-dependent
1662 value specifies the number of bits in Zn. */
1664 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1665 aarch64_opnd_info *info, aarch64_insn code,
1666 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1668 unsigned int reg_bits = get_operand_specific_data (self);
1669 unsigned int val = extract_all_fields (self, code);
1670 info->reglane.regno = val & ((1 << reg_bits) - 1);
1671 info->reglane.index = val >> reg_bits;
1675 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1676 to use for Zn. The opcode-dependent value specifies the number
1677 of registers in the list. */
1679 aarch64_ext_sve_reglist (const aarch64_operand *self,
1680 aarch64_opnd_info *info, aarch64_insn code,
1681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1683 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1684 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1688 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1689 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1692 aarch64_ext_sve_scale (const aarch64_operand *self,
1693 aarch64_opnd_info *info, aarch64_insn code,
1694 const aarch64_inst *inst)
1698 if (!aarch64_ext_imm (self, info, code, inst))
1700 val = extract_field (FLD_SVE_imm4, code, 0);
1701 info->shifter.kind = AARCH64_MOD_MUL;
1702 info->shifter.amount = val + 1;
1703 info->shifter.operator_present = (val != 0);
1704 info->shifter.amount_present = (val != 0);
1708 /* Return the top set bit in VALUE, which is expected to be relatively
1711 get_top_bit (uint64_t value)
1713 while ((value & -value) != value)
1714 value -= value & -value;
1718 /* Decode an SVE shift-left immediate. */
1720 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1721 aarch64_opnd_info *info, const aarch64_insn code,
1722 const aarch64_inst *inst)
1724 if (!aarch64_ext_imm (self, info, code, inst)
1725 || info->imm.value == 0)
1728 info->imm.value -= get_top_bit (info->imm.value);
1732 /* Decode an SVE shift-right immediate. */
1734 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1735 aarch64_opnd_info *info, const aarch64_insn code,
1736 const aarch64_inst *inst)
1738 if (!aarch64_ext_imm (self, info, code, inst)
1739 || info->imm.value == 0)
1742 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1746 /* Bitfields that are commonly used to encode certain operands' information
1747 may be partially used as part of the base opcode in some instructions.
1748 For example, the bit 1 of the field 'size' in
1749 FCVTXN <Vb><d>, <Va><n>
1750 is actually part of the base opcode, while only size<0> is available
1751 for encoding the register type. Another example is the AdvSIMD
1752 instruction ORR (register), in which the field 'size' is also used for
1753 the base opcode, leaving only the field 'Q' available to encode the
1754 vector register arrangement specifier '8B' or '16B'.
1756 This function tries to deduce the qualifier from the value of partially
1757 constrained field(s). Given the VALUE of such a field or fields, the
1758 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1759 operand encoding), the function returns the matching qualifier or
1760 AARCH64_OPND_QLF_NIL if nothing matches.
1762 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1763 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1764 may end with AARCH64_OPND_QLF_NIL. */
1766 static enum aarch64_opnd_qualifier
1767 get_qualifier_from_partial_encoding (aarch64_insn value,
1768 const enum aarch64_opnd_qualifier* \
1773 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1774 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1776 aarch64_insn standard_value;
1777 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1779 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1780 if ((standard_value & mask) == (value & mask))
1781 return candidates[i];
1783 return AARCH64_OPND_QLF_NIL;
1786 /* Given a list of qualifier sequences, return all possible valid qualifiers
1787 for operand IDX in QUALIFIERS.
1788 Assume QUALIFIERS is an array whose length is large enough. */
1791 get_operand_possible_qualifiers (int idx,
1792 const aarch64_opnd_qualifier_seq_t *list,
1793 enum aarch64_opnd_qualifier *qualifiers)
1796 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1797 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1801 /* Decode the size Q field for e.g. SHADD.
1802 We tag one operand with the qualifer according to the code;
1803 whether the qualifier is valid for this opcode or not, it is the
1804 duty of the semantic checking. */
1807 decode_sizeq (aarch64_inst *inst)
1810 enum aarch64_opnd_qualifier qualifier;
1812 aarch64_insn value, mask;
1813 enum aarch64_field_kind fld_sz;
1814 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1816 if (inst->opcode->iclass == asisdlse
1817 || inst->opcode->iclass == asisdlsep
1818 || inst->opcode->iclass == asisdlso
1819 || inst->opcode->iclass == asisdlsop)
1820 fld_sz = FLD_vldst_size;
1825 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1826 /* Obtain the info that which bits of fields Q and size are actually
1827 available for operand encoding. Opcodes like FMAXNM and FMLA have
1828 size[1] unavailable. */
1829 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1831 /* The index of the operand we are going to tag a qualifier and the qualifer
1832 itself are reasoned from the value of the size and Q fields and the
1833 possible valid qualifier lists. */
1834 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1835 DEBUG_TRACE ("key idx: %d", idx);
1837 /* For most related instruciton, size:Q are fully available for operand
1841 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1845 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1847 #ifdef DEBUG_AARCH64
1851 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1852 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1853 DEBUG_TRACE ("qualifier %d: %s", i,
1854 aarch64_get_qualifier_name(candidates[i]));
1855 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1857 #endif /* DEBUG_AARCH64 */
1859 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1861 if (qualifier == AARCH64_OPND_QLF_NIL)
1864 inst->operands[idx].qualifier = qualifier;
1868 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1869 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1872 decode_asimd_fcvt (aarch64_inst *inst)
1874 aarch64_field field = {0, 0};
1876 enum aarch64_opnd_qualifier qualifier;
1878 gen_sub_field (FLD_size, 0, 1, &field);
1879 value = extract_field_2 (&field, inst->value, 0);
1880 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1881 : AARCH64_OPND_QLF_V_2D;
1882 switch (inst->opcode->op)
1886 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1887 inst->operands[1].qualifier = qualifier;
1891 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1892 inst->operands[0].qualifier = qualifier;
1902 /* Decode size[0], i.e. bit 22, for
1903 e.g. FCVTXN <Vb><d>, <Va><n>. */
1906 decode_asisd_fcvtxn (aarch64_inst *inst)
1908 aarch64_field field = {0, 0};
1909 gen_sub_field (FLD_size, 0, 1, &field);
1910 if (!extract_field_2 (&field, inst->value, 0))
1912 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1916 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1918 decode_fcvt (aarch64_inst *inst)
1920 enum aarch64_opnd_qualifier qualifier;
1922 const aarch64_field field = {15, 2};
1925 value = extract_field_2 (&field, inst->value, 0);
1928 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1929 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1930 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1933 inst->operands[0].qualifier = qualifier;
1938 /* Do miscellaneous decodings that are not common enough to be driven by
1942 do_misc_decoding (aarch64_inst *inst)
1945 switch (inst->opcode->op)
1948 return decode_fcvt (inst);
1954 return decode_asimd_fcvt (inst);
1957 return decode_asisd_fcvtxn (inst);
1961 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1962 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1963 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1966 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1967 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1970 /* Index must be zero. */
1971 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1972 return value > 0 && value <= 16 && value == (value & -value);
1975 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1976 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1979 /* Index must be nonzero. */
1980 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1981 return value > 0 && value != (value & -value);
1984 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1985 == extract_field (FLD_SVE_Pm, inst->value, 0));
1987 case OP_MOVZS_P_P_P:
1989 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1990 == extract_field (FLD_SVE_Pm, inst->value, 0));
1992 case OP_NOTS_P_P_P_Z:
1993 case OP_NOT_P_P_P_Z:
1994 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1995 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2002 /* Opcodes that have fields shared by multiple operands are usually flagged
2003 with flags. In this function, we detect such flags, decode the related
2004 field(s) and store the information in one of the related operands. The
2005 'one' operand is not any operand but one of the operands that can
2006 accommadate all the information that has been decoded. */
2009 do_special_decoding (aarch64_inst *inst)
2013 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2014 if (inst->opcode->flags & F_COND)
2016 value = extract_field (FLD_cond2, inst->value, 0);
2017 inst->cond = get_cond_from_value (value);
2020 if (inst->opcode->flags & F_SF)
2022 idx = select_operand_for_sf_field_coding (inst->opcode);
2023 value = extract_field (FLD_sf, inst->value, 0);
2024 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2025 if ((inst->opcode->flags & F_N)
2026 && extract_field (FLD_N, inst->value, 0) != value)
2030 if (inst->opcode->flags & F_LSE_SZ)
2032 idx = select_operand_for_sf_field_coding (inst->opcode);
2033 value = extract_field (FLD_lse_sz, inst->value, 0);
2034 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2036 /* size:Q fields. */
2037 if (inst->opcode->flags & F_SIZEQ)
2038 return decode_sizeq (inst);
2040 if (inst->opcode->flags & F_FPTYPE)
2042 idx = select_operand_for_fptype_field_coding (inst->opcode);
2043 value = extract_field (FLD_type, inst->value, 0);
2046 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2047 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2048 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2053 if (inst->opcode->flags & F_SSIZE)
2055 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2056 of the base opcode. */
2058 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2059 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2060 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2061 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2062 /* For most related instruciton, the 'size' field is fully available for
2063 operand encoding. */
2065 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2068 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2070 inst->operands[idx].qualifier
2071 = get_qualifier_from_partial_encoding (value, candidates, mask);
2075 if (inst->opcode->flags & F_T)
2077 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2080 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2081 == AARCH64_OPND_CLASS_SIMD_REG);
2092 val = extract_field (FLD_imm5, inst->value, 0);
2093 while ((val & 0x1) == 0 && ++num <= 3)
2097 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2098 inst->operands[0].qualifier =
2099 get_vreg_qualifier_from_value ((num << 1) | Q);
2102 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2104 /* Use Rt to encode in the case of e.g.
2105 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2106 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2109 /* Otherwise use the result operand, which has to be a integer
2111 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2112 == AARCH64_OPND_CLASS_INT_REG);
2115 assert (idx == 0 || idx == 1);
2116 value = extract_field (FLD_Q, inst->value, 0);
2117 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2120 if (inst->opcode->flags & F_LDS_SIZE)
2122 aarch64_field field = {0, 0};
2123 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2124 == AARCH64_OPND_CLASS_INT_REG);
2125 gen_sub_field (FLD_opc, 0, 1, &field);
2126 value = extract_field_2 (&field, inst->value, 0);
2127 inst->operands[0].qualifier
2128 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2131 /* Miscellaneous decoding; done as the last step. */
2132 if (inst->opcode->flags & F_MISC)
2133 return do_misc_decoding (inst);
2138 /* Converters converting a real opcode instruction to its alias form. */
2140 /* ROR <Wd>, <Ws>, #<shift>
2142 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2144 convert_extr_to_ror (aarch64_inst *inst)
2146 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2148 copy_operand_info (inst, 2, 3);
2149 inst->operands[3].type = AARCH64_OPND_NIL;
2155 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2157 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2159 convert_shll_to_xtl (aarch64_inst *inst)
2161 if (inst->operands[2].imm.value == 0)
2163 inst->operands[2].type = AARCH64_OPND_NIL;
2170 UBFM <Xd>, <Xn>, #<shift>, #63.
2172 LSR <Xd>, <Xn>, #<shift>. */
2174 convert_bfm_to_sr (aarch64_inst *inst)
2178 imms = inst->operands[3].imm.value;
2179 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2182 inst->operands[3].type = AARCH64_OPND_NIL;
2189 /* Convert MOV to ORR. */
2191 convert_orr_to_mov (aarch64_inst *inst)
2193 /* MOV <Vd>.<T>, <Vn>.<T>
2195 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2196 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2198 inst->operands[2].type = AARCH64_OPND_NIL;
2204 /* When <imms> >= <immr>, the instruction written:
2205 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2207 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2210 convert_bfm_to_bfx (aarch64_inst *inst)
2214 immr = inst->operands[2].imm.value;
2215 imms = inst->operands[3].imm.value;
2219 inst->operands[2].imm.value = lsb;
2220 inst->operands[3].imm.value = imms + 1 - lsb;
2221 /* The two opcodes have different qualifiers for
2222 the immediate operands; reset to help the checking. */
2223 reset_operand_qualifier (inst, 2);
2224 reset_operand_qualifier (inst, 3);
2231 /* When <imms> < <immr>, the instruction written:
2232 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2234 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2237 convert_bfm_to_bfi (aarch64_inst *inst)
2239 int64_t immr, imms, val;
2241 immr = inst->operands[2].imm.value;
2242 imms = inst->operands[3].imm.value;
2243 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2246 inst->operands[2].imm.value = (val - immr) & (val - 1);
2247 inst->operands[3].imm.value = imms + 1;
2248 /* The two opcodes have different qualifiers for
2249 the immediate operands; reset to help the checking. */
2250 reset_operand_qualifier (inst, 2);
2251 reset_operand_qualifier (inst, 3);
2258 /* The instruction written:
2259 BFC <Xd>, #<lsb>, #<width>
2261 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2264 convert_bfm_to_bfc (aarch64_inst *inst)
2266 int64_t immr, imms, val;
2268 /* Should have been assured by the base opcode value. */
2269 assert (inst->operands[1].reg.regno == 0x1f);
2271 immr = inst->operands[2].imm.value;
2272 imms = inst->operands[3].imm.value;
2273 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2276 /* Drop XZR from the second operand. */
2277 copy_operand_info (inst, 1, 2);
2278 copy_operand_info (inst, 2, 3);
2279 inst->operands[3].type = AARCH64_OPND_NIL;
2281 /* Recalculate the immediates. */
2282 inst->operands[1].imm.value = (val - immr) & (val - 1);
2283 inst->operands[2].imm.value = imms + 1;
2285 /* The two opcodes have different qualifiers for the operands; reset to
2286 help the checking. */
2287 reset_operand_qualifier (inst, 1);
2288 reset_operand_qualifier (inst, 2);
2289 reset_operand_qualifier (inst, 3);
2297 /* The instruction written:
2298 LSL <Xd>, <Xn>, #<shift>
2300 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2303 convert_ubfm_to_lsl (aarch64_inst *inst)
2305 int64_t immr = inst->operands[2].imm.value;
2306 int64_t imms = inst->operands[3].imm.value;
2308 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2310 if ((immr == 0 && imms == val) || immr == imms + 1)
2312 inst->operands[3].type = AARCH64_OPND_NIL;
2313 inst->operands[2].imm.value = val - imms;
2320 /* CINC <Wd>, <Wn>, <cond>
2322 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2323 where <cond> is not AL or NV. */
2326 convert_from_csel (aarch64_inst *inst)
2328 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2329 && (inst->operands[3].cond->value & 0xe) != 0xe)
2331 copy_operand_info (inst, 2, 3);
2332 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2333 inst->operands[3].type = AARCH64_OPND_NIL;
2339 /* CSET <Wd>, <cond>
2341 CSINC <Wd>, WZR, WZR, invert(<cond>)
2342 where <cond> is not AL or NV. */
2345 convert_csinc_to_cset (aarch64_inst *inst)
2347 if (inst->operands[1].reg.regno == 0x1f
2348 && inst->operands[2].reg.regno == 0x1f
2349 && (inst->operands[3].cond->value & 0xe) != 0xe)
2351 copy_operand_info (inst, 1, 3);
2352 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2353 inst->operands[3].type = AARCH64_OPND_NIL;
2354 inst->operands[2].type = AARCH64_OPND_NIL;
2362 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2364 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2365 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2366 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2367 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2368 machine-instruction mnemonic must be used. */
2371 convert_movewide_to_mov (aarch64_inst *inst)
2373 uint64_t value = inst->operands[1].imm.value;
2374 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2375 if (value == 0 && inst->operands[1].shifter.amount != 0)
2377 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2378 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2379 value <<= inst->operands[1].shifter.amount;
2380 /* As an alias convertor, it has to be clear that the INST->OPCODE
2381 is the opcode of the real instruction. */
2382 if (inst->opcode->op == OP_MOVN)
2384 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2386 /* A MOVN has an immediate that could be encoded by MOVZ. */
2387 if (aarch64_wide_constant_p (value, is32, NULL))
2390 inst->operands[1].imm.value = value;
2391 inst->operands[1].shifter.amount = 0;
2397 ORR <Wd>, WZR, #<imm>.
2399 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2400 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2401 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2402 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2403 machine-instruction mnemonic must be used. */
2406 convert_movebitmask_to_mov (aarch64_inst *inst)
2411 /* Should have been assured by the base opcode value. */
2412 assert (inst->operands[1].reg.regno == 0x1f);
2413 copy_operand_info (inst, 1, 2);
2414 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2415 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2416 value = inst->operands[1].imm.value;
2417 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2419 if (inst->operands[0].reg.regno != 0x1f
2420 && (aarch64_wide_constant_p (value, is32, NULL)
2421 || aarch64_wide_constant_p (~value, is32, NULL)))
2424 inst->operands[2].type = AARCH64_OPND_NIL;
2428 /* Some alias opcodes are disassembled by being converted from their real-form.
2429 N.B. INST->OPCODE is the real opcode rather than the alias. */
2432 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2438 return convert_bfm_to_sr (inst);
2440 return convert_ubfm_to_lsl (inst);
2444 return convert_from_csel (inst);
2447 return convert_csinc_to_cset (inst);
2451 return convert_bfm_to_bfx (inst);
2455 return convert_bfm_to_bfi (inst);
2457 return convert_bfm_to_bfc (inst);
2459 return convert_orr_to_mov (inst);
2460 case OP_MOV_IMM_WIDE:
2461 case OP_MOV_IMM_WIDEN:
2462 return convert_movewide_to_mov (inst);
2463 case OP_MOV_IMM_LOG:
2464 return convert_movebitmask_to_mov (inst);
2466 return convert_extr_to_ror (inst);
2471 return convert_shll_to_xtl (inst);
2477 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2478 aarch64_inst *, int);
2480 /* Given the instruction information in *INST, check if the instruction has
2481 any alias form that can be used to represent *INST. If the answer is yes,
2482 update *INST to be in the form of the determined alias. */
2484 /* In the opcode description table, the following flags are used in opcode
2485 entries to help establish the relations between the real and alias opcodes:
2487 F_ALIAS: opcode is an alias
2488 F_HAS_ALIAS: opcode has alias(es)
2491 F_P3: Disassembly preference priority 1-3 (the larger the
2492 higher). If nothing is specified, it is the priority
2493 0 by default, i.e. the lowest priority.
2495 Although the relation between the machine and the alias instructions are not
2496 explicitly described, it can be easily determined from the base opcode
2497 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2498 description entries:
2500 The mask of an alias opcode must be equal to or a super-set (i.e. more
2501 constrained) of that of the aliased opcode; so is the base opcode value.
2503 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2504 && (opcode->mask & real->mask) == real->mask
2505 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2506 then OPCODE is an alias of, and only of, the REAL instruction
2508 The alias relationship is forced flat-structured to keep related algorithm
2509 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2511 During the disassembling, the decoding decision tree (in
2512 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2513 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2514 not specified), the disassembler will check whether there is any alias
2515 instruction exists for this real instruction. If there is, the disassembler
2516 will try to disassemble the 32-bit binary again using the alias's rule, or
2517 try to convert the IR to the form of the alias. In the case of the multiple
2518 aliases, the aliases are tried one by one from the highest priority
2519 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2520 first succeeds first adopted.
2522 You may ask why there is a need for the conversion of IR from one form to
2523 another in handling certain aliases. This is because on one hand it avoids
2524 adding more operand code to handle unusual encoding/decoding; on other
2525 hand, during the disassembling, the conversion is an effective approach to
2526 check the condition of an alias (as an alias may be adopted only if certain
2527 conditions are met).
2529 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2530 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2531 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2534 determine_disassembling_preference (struct aarch64_inst *inst)
2536 const aarch64_opcode *opcode;
2537 const aarch64_opcode *alias;
2539 opcode = inst->opcode;
2541 /* This opcode does not have an alias, so use itself. */
2542 if (!opcode_has_alias (opcode))
2545 alias = aarch64_find_alias_opcode (opcode);
2548 #ifdef DEBUG_AARCH64
2551 const aarch64_opcode *tmp = alias;
2552 printf ("#### LIST orderd: ");
2555 printf ("%s, ", tmp->name);
2556 tmp = aarch64_find_next_alias_opcode (tmp);
2560 #endif /* DEBUG_AARCH64 */
2562 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2564 DEBUG_TRACE ("try %s", alias->name);
2565 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2567 /* An alias can be a pseudo opcode which will never be used in the
2568 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2570 if (pseudo_opcode_p (alias))
2572 DEBUG_TRACE ("skip pseudo %s", alias->name);
2576 if ((inst->value & alias->mask) != alias->opcode)
2578 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2581 /* No need to do any complicated transformation on operands, if the alias
2582 opcode does not have any operand. */
2583 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2585 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2586 aarch64_replace_opcode (inst, alias);
2589 if (alias->flags & F_CONV)
2592 memcpy (©, inst, sizeof (aarch64_inst));
2593 /* ALIAS is the preference as long as the instruction can be
2594 successfully converted to the form of ALIAS. */
2595 if (convert_to_alias (©, alias) == 1)
2597 aarch64_replace_opcode (©, alias);
2598 assert (aarch64_match_operands_constraint (©, NULL));
2599 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2600 memcpy (inst, ©, sizeof (aarch64_inst));
2606 /* Directly decode the alias opcode. */
2608 memset (&temp, '\0', sizeof (aarch64_inst));
2609 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2611 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2612 memcpy (inst, &temp, sizeof (aarch64_inst));
2619 /* Some instructions (including all SVE ones) use the instruction class
2620 to describe how a qualifiers_list index is represented in the instruction
2621 encoding. If INST is such an instruction, decode the appropriate fields
2622 and fill in the operand qualifiers accordingly. Return true if no
2623 problems are found. */
2626 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2631 switch (inst->opcode->iclass)
2634 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2638 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2641 while ((i & 1) == 0)
2649 /* Pick the smallest applicable element size. */
2650 if ((inst->value & 0x20600) == 0x600)
2652 else if ((inst->value & 0x20400) == 0x400)
2654 else if ((inst->value & 0x20000) == 0)
2661 /* sve_misc instructions have only a single variant. */
2665 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2669 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2672 case sve_shift_pred:
2673 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2684 case sve_shift_unpred:
2685 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2689 variant = extract_field (FLD_size, inst->value, 0);
2695 variant = extract_field (FLD_size, inst->value, 0);
2699 i = extract_field (FLD_size, inst->value, 0);
2706 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2710 /* No mapping between instruction class and qualifiers. */
2714 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2715 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2718 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2719 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2722 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2723 determined and used to disassemble CODE; this is done just before the
2727 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2728 aarch64_inst *inst, int noaliases_p)
2732 DEBUG_TRACE ("enter with %s", opcode->name);
2734 assert (opcode && inst);
2736 /* Check the base opcode. */
2737 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2739 DEBUG_TRACE ("base opcode match FAIL");
2744 memset (inst, '\0', sizeof (aarch64_inst));
2746 inst->opcode = opcode;
2749 /* Assign operand codes and indexes. */
2750 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2752 if (opcode->operands[i] == AARCH64_OPND_NIL)
2754 inst->operands[i].type = opcode->operands[i];
2755 inst->operands[i].idx = i;
2758 /* Call the opcode decoder indicated by flags. */
2759 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2761 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2765 /* Possibly use the instruction class to determine the correct
2767 if (!aarch64_decode_variant_using_iclass (inst))
2769 DEBUG_TRACE ("iclass-based decoder FAIL");
2773 /* Call operand decoders. */
2774 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2776 const aarch64_operand *opnd;
2777 enum aarch64_opnd type;
2779 type = opcode->operands[i];
2780 if (type == AARCH64_OPND_NIL)
2782 opnd = &aarch64_operands[type];
2783 if (operand_has_extractor (opnd)
2784 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2786 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2791 /* If the opcode has a verifier, then check it now. */
2792 if (opcode->verifier && ! opcode->verifier (opcode, code))
2794 DEBUG_TRACE ("operand verifier FAIL");
2798 /* Match the qualifiers. */
2799 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2801 /* Arriving here, the CODE has been determined as a valid instruction
2802 of OPCODE and *INST has been filled with information of this OPCODE
2803 instruction. Before the return, check if the instruction has any
2804 alias and should be disassembled in the form of its alias instead.
2805 If the answer is yes, *INST will be updated. */
2807 determine_disassembling_preference (inst);
2808 DEBUG_TRACE ("SUCCESS");
2813 DEBUG_TRACE ("constraint matching FAIL");
2820 /* This does some user-friendly fix-up to *INST. It is currently focus on
2821 the adjustment of qualifiers to help the printed instruction
2822 recognized/understood more easily. */
2825 user_friendly_fixup (aarch64_inst *inst)
2827 switch (inst->opcode->iclass)
2830 /* TBNZ Xn|Wn, #uimm6, label
2831 Test and Branch Not Zero: conditionally jumps to label if bit number
2832 uimm6 in register Xn is not zero. The bit number implies the width of
2833 the register, which may be written and should be disassembled as Wn if
2834 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2836 if (inst->operands[1].imm.value < 32)
2837 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2843 /* Decode INSN and fill in *INST the instruction information. An alias
2844 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2848 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2849 bfd_boolean noaliases_p)
2851 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2853 #ifdef DEBUG_AARCH64
2856 const aarch64_opcode *tmp = opcode;
2858 DEBUG_TRACE ("opcode lookup:");
2861 aarch64_verbose (" %s", tmp->name);
2862 tmp = aarch64_find_next_opcode (tmp);
2865 #endif /* DEBUG_AARCH64 */
2867 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2868 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2869 opcode field and value, apart from the difference that one of them has an
2870 extra field as part of the opcode, but such a field is used for operand
2871 encoding in other opcode(s) ('immh' in the case of the example). */
2872 while (opcode != NULL)
2874 /* But only one opcode can be decoded successfully for, as the
2875 decoding routine will check the constraint carefully. */
2876 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2878 opcode = aarch64_find_next_opcode (opcode);
2884 /* Print operands. */
2887 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2888 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2890 int i, pcrel_p, num_printed;
2891 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2894 /* We regard the opcode operand info more, however we also look into
2895 the inst->operands to support the disassembling of the optional
2897 The two operand code should be the same in all cases, apart from
2898 when the operand can be optional. */
2899 if (opcode->operands[i] == AARCH64_OPND_NIL
2900 || opnds[i].type == AARCH64_OPND_NIL)
2903 /* Generate the operand string in STR. */
2904 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2907 /* Print the delimiter (taking account of omitted operand(s)). */
2909 (*info->fprintf_func) (info->stream, "%s",
2910 num_printed++ == 0 ? "\t" : ", ");
2912 /* Print the operand. */
2914 (*info->print_address_func) (info->target, info);
2916 (*info->fprintf_func) (info->stream, "%s", str);
2920 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2923 remove_dot_suffix (char *name, const aarch64_inst *inst)
2928 ptr = strchr (inst->opcode->name, '.');
2929 assert (ptr && inst->cond);
2930 len = ptr - inst->opcode->name;
2932 strncpy (name, inst->opcode->name, len);
2936 /* Print the instruction mnemonic name. */
2939 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2941 if (inst->opcode->flags & F_COND)
2943 /* For instructions that are truly conditionally executed, e.g. b.cond,
2944 prepare the full mnemonic name with the corresponding condition
2948 remove_dot_suffix (name, inst);
2949 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2952 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2955 /* Decide whether we need to print a comment after the operands of
2956 instruction INST. */
2959 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2961 if (inst->opcode->flags & F_COND)
2964 unsigned int i, num_conds;
2966 remove_dot_suffix (name, inst);
2967 num_conds = ARRAY_SIZE (inst->cond->names);
2968 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2969 (*info->fprintf_func) (info->stream, "%s %s.%s",
2970 i == 1 ? " //" : ",",
2971 name, inst->cond->names[i]);
2975 /* Print the instruction according to *INST. */
2978 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2979 struct disassemble_info *info)
2981 print_mnemonic_name (inst, info);
2982 print_operands (pc, inst->opcode, inst->operands, info);
2983 print_comment (inst, info);
2986 /* Entry-point of the instruction disassembler and printer. */
2989 print_insn_aarch64_word (bfd_vma pc,
2991 struct disassemble_info *info)
2993 static const char *err_msg[6] =
2996 [-ERR_UND] = "undefined",
2997 [-ERR_UNP] = "unpredictable",
3004 info->insn_info_valid = 1;
3005 info->branch_delay_insns = 0;
3006 info->data_size = 0;
3010 if (info->flags & INSN_HAS_RELOC)
3011 /* If the instruction has a reloc associated with it, then
3012 the offset field in the instruction will actually be the
3013 addend for the reloc. (If we are using REL type relocs).
3014 In such cases, we can ignore the pc when computing
3015 addresses, since the addend is not currently pc-relative. */
3018 ret = aarch64_decode_insn (word, &inst, no_aliases);
3020 if (((word >> 21) & 0x3ff) == 1)
3022 /* RESERVED for ALES. */
3023 assert (ret != ERR_OK);
3032 /* Handle undefined instructions. */
3033 info->insn_type = dis_noninsn;
3034 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3035 word, err_msg[-ret]);
3038 user_friendly_fixup (&inst);
3039 print_aarch64_insn (pc, &inst, info);
3046 /* Disallow mapping symbols ($x, $d etc) from
3047 being displayed in symbol relative addresses. */
3050 aarch64_symbol_is_valid (asymbol * sym,
3051 struct disassemble_info * info ATTRIBUTE_UNUSED)
3058 name = bfd_asymbol_name (sym);
3062 || (name[1] != 'x' && name[1] != 'd')
3063 || (name[2] != '\0' && name[2] != '.'));
3066 /* Print data bytes on INFO->STREAM. */
3069 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3071 struct disassemble_info *info)
3073 switch (info->bytes_per_chunk)
3076 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3079 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3082 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3089 /* Try to infer the code or data type from a symbol.
3090 Returns nonzero if *MAP_TYPE was set. */
3093 get_sym_code_type (struct disassemble_info *info, int n,
3094 enum map_type *map_type)
3096 elf_symbol_type *es;
3100 es = *(elf_symbol_type **)(info->symtab + n);
3101 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3103 /* If the symbol has function type then use that. */
3104 if (type == STT_FUNC)
3106 *map_type = MAP_INSN;
3110 /* Check for mapping symbols. */
3111 name = bfd_asymbol_name(info->symtab[n]);
3113 && (name[1] == 'x' || name[1] == 'd')
3114 && (name[2] == '\0' || name[2] == '.'))
3116 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3123 /* Entry-point of the AArch64 disassembler. */
3126 print_insn_aarch64 (bfd_vma pc,
3127 struct disassemble_info *info)
3129 bfd_byte buffer[INSNLEN];
3131 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3132 bfd_boolean found = FALSE;
3133 unsigned int size = 4;
3136 if (info->disassembler_options)
3138 set_default_aarch64_dis_options (info);
3140 parse_aarch64_dis_options (info->disassembler_options);
3142 /* To avoid repeated parsing of these options, we remove them here. */
3143 info->disassembler_options = NULL;
3146 /* Aarch64 instructions are always little-endian */
3147 info->endian_code = BFD_ENDIAN_LITTLE;
3149 /* First check the full symtab for a mapping symbol, even if there
3150 are no usable non-mapping symbols for this address. */
3151 if (info->symtab_size != 0
3152 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3154 enum map_type type = MAP_INSN;
3159 if (pc <= last_mapping_addr)
3160 last_mapping_sym = -1;
3162 /* Start scanning at the start of the function, or wherever
3163 we finished last time. */
3164 n = info->symtab_pos + 1;
3165 if (n < last_mapping_sym)
3166 n = last_mapping_sym;
3168 /* Scan up to the location being disassembled. */
3169 for (; n < info->symtab_size; n++)
3171 addr = bfd_asymbol_value (info->symtab[n]);
3174 if ((info->section == NULL
3175 || info->section == info->symtab[n]->section)
3176 && get_sym_code_type (info, n, &type))
3185 n = info->symtab_pos;
3186 if (n < last_mapping_sym)
3187 n = last_mapping_sym;
3189 /* No mapping symbol found at this address. Look backwards
3190 for a preceeding one. */
3193 if (get_sym_code_type (info, n, &type))
3202 last_mapping_sym = last_sym;
3205 /* Look a little bit ahead to see if we should print out
3206 less than four bytes of data. If there's a symbol,
3207 mapping or otherwise, after two bytes then don't
3209 if (last_type == MAP_DATA)
3211 size = 4 - (pc & 3);
3212 for (n = last_sym + 1; n < info->symtab_size; n++)
3214 addr = bfd_asymbol_value (info->symtab[n]);
3217 if (addr - pc < size)
3222 /* If the next symbol is after three bytes, we need to
3223 print only part of the data, so that we can use either
3226 size = (pc & 1) ? 1 : 2;
3230 if (last_type == MAP_DATA)
3232 /* size was set above. */
3233 info->bytes_per_chunk = size;
3234 info->display_endian = info->endian;
3235 printer = print_insn_data;
3239 info->bytes_per_chunk = size = INSNLEN;
3240 info->display_endian = info->endian_code;
3241 printer = print_insn_aarch64_word;
3244 status = (*info->read_memory_func) (pc, buffer, size, info);
3247 (*info->memory_error_func) (status, pc, info);
3251 data = bfd_get_bits (buffer, size * 8,
3252 info->display_endian == BFD_ENDIAN_BIG);
3254 (*printer) (pc, data, info);
3260 print_aarch64_disassembler_options (FILE *stream)
3262 fprintf (stream, _("\n\
3263 The following AARCH64 specific disassembler options are supported for use\n\
3264 with the -M switch (multiple options should be separated by commas):\n"));
3266 fprintf (stream, _("\n\
3267 no-aliases Don't print instruction aliases.\n"));
3269 fprintf (stream, _("\n\
3270 aliases Do print instruction aliases.\n"));
3272 #ifdef DEBUG_AARCH64
3273 fprintf (stream, _("\n\
3274 debug_dump Temp switch for debug trace.\n"));
3275 #endif /* DEBUG_AARCH64 */
3277 fprintf (stream, _("\n"));