1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
50 output as comments. */
52 /* Currently active instruction sequence. */
53 static aarch64_instr_sequence insn_sequence ATTRIBUTE_UNUSED;
56 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
61 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
63 /* Try to match options that are simple flags */
64 if (CONST_STRNEQ (option, "no-aliases"))
70 if (CONST_STRNEQ (option, "aliases"))
76 if (CONST_STRNEQ (option, "no-notes"))
82 if (CONST_STRNEQ (option, "notes"))
89 if (CONST_STRNEQ (option, "debug_dump"))
94 #endif /* DEBUG_AARCH64 */
97 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
101 parse_aarch64_dis_options (const char *options)
103 const char *option_end;
108 while (*options != '\0')
110 /* Skip empty options. */
117 /* We know that *options is neither NUL or a comma. */
118 option_end = options + 1;
119 while (*option_end != ',' && *option_end != '\0')
122 parse_aarch64_dis_option (options, option_end - options);
124 /* Go on to the next one. If option_end points to a comma, it
125 will be skipped above. */
126 options = option_end;
130 /* Functions doing the instruction disassembling. */
132 /* The unnamed arguments consist of the number of fields and information about
133 these fields where the VALUE will be extracted from CODE and returned.
134 MASK can be zero or the base mask of the opcode.
136 N.B. the fields are required to be in such an order than the most signficant
137 field for VALUE comes the first, e.g. the <index> in
138 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
139 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
140 the order of H, L, M. */
143 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
146 const aarch64_field *field;
147 enum aarch64_field_kind kind;
151 num = va_arg (va, uint32_t);
153 aarch64_insn value = 0x0;
156 kind = va_arg (va, enum aarch64_field_kind);
157 field = &fields[kind];
158 value <<= field->width;
159 value |= extract_field (kind, code, mask);
164 /* Extract the value of all fields in SELF->fields from instruction CODE.
165 The least significant bit comes from the final field. */
168 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
172 enum aarch64_field_kind kind;
175 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
177 kind = self->fields[i];
178 value <<= fields[kind].width;
179 value |= extract_field (kind, code, 0);
184 /* Sign-extend bit I of VALUE. */
185 static inline int32_t
186 sign_extend (aarch64_insn value, unsigned i)
188 uint32_t ret = value;
191 if ((value >> i) & 0x1)
193 uint32_t val = (uint32_t)(-1) << i;
196 return (int32_t) ret;
199 /* N.B. the following inline helpfer functions create a dependency on the
200 order of operand qualifier enumerators. */
202 /* Given VALUE, return qualifier for a general purpose register. */
203 static inline enum aarch64_opnd_qualifier
204 get_greg_qualifier_from_value (aarch64_insn value)
206 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
208 && aarch64_get_qualifier_standard_value (qualifier) == value);
212 /* Given VALUE, return qualifier for a vector register. This does not support
213 decoding instructions that accept the 2H vector type. */
215 static inline enum aarch64_opnd_qualifier
216 get_vreg_qualifier_from_value (aarch64_insn value)
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
220 /* Instructions using vector type 2H should not call this function. Skip over
222 if (qualifier >= AARCH64_OPND_QLF_V_2H)
226 && aarch64_get_qualifier_standard_value (qualifier) == value);
230 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
231 static inline enum aarch64_opnd_qualifier
232 get_sreg_qualifier_from_value (aarch64_insn value)
234 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
237 && aarch64_get_qualifier_standard_value (qualifier) == value);
241 /* Given the instruction in *INST which is probably half way through the
242 decoding and our caller wants to know the expected qualifier for operand
243 I. Return such a qualifier if we can establish it; otherwise return
244 AARCH64_OPND_QLF_NIL. */
246 static aarch64_opnd_qualifier_t
247 get_expected_qualifier (const aarch64_inst *inst, int i)
249 aarch64_opnd_qualifier_seq_t qualifiers;
250 /* Should not be called if the qualifier is known. */
251 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
252 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
254 return qualifiers[i];
256 return AARCH64_OPND_QLF_NIL;
259 /* Operand extractors. */
262 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
263 const aarch64_insn code,
264 const aarch64_inst *inst ATTRIBUTE_UNUSED,
265 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
267 info->reg.regno = extract_field (self->fields[0], code, 0);
272 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
273 const aarch64_insn code ATTRIBUTE_UNUSED,
274 const aarch64_inst *inst ATTRIBUTE_UNUSED,
275 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
277 assert (info->idx == 1
279 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
283 /* e.g. IC <ic_op>{, <Xt>}. */
285 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED,
288 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
290 info->reg.regno = extract_field (self->fields[0], code, 0);
291 assert (info->idx == 1
292 && (aarch64_get_operand_class (inst->operands[0].type)
293 == AARCH64_OPND_CLASS_SYSTEM));
294 /* This will make the constraint checking happy and more importantly will
295 help the disassembler determine whether this operand is optional or
297 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
302 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
304 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
305 const aarch64_insn code,
306 const aarch64_inst *inst ATTRIBUTE_UNUSED,
307 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
310 info->reglane.regno = extract_field (self->fields[0], code,
313 /* Index and/or type. */
314 if (inst->opcode->iclass == asisdone
315 || inst->opcode->iclass == asimdins)
317 if (info->type == AARCH64_OPND_En
318 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
321 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
322 assert (info->idx == 1); /* Vn */
323 aarch64_insn value = extract_field (FLD_imm4, code, 0);
324 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
325 info->qualifier = get_expected_qualifier (inst, info->idx);
326 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
327 info->reglane.index = value >> shift;
331 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
339 aarch64_insn value = extract_field (FLD_imm5, code, 0);
340 while (++pos <= 3 && (value & 0x1) == 0)
344 info->qualifier = get_sreg_qualifier_from_value (pos);
345 info->reglane.index = (unsigned) (value >> 1);
348 else if (inst->opcode->iclass == dotproduct)
350 /* Need information in other operand(s) to help decoding. */
351 info->qualifier = get_expected_qualifier (inst, info->idx);
352 switch (info->qualifier)
354 case AARCH64_OPND_QLF_S_4B:
356 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
357 info->reglane.regno &= 0x1f;
363 else if (inst->opcode->iclass == cryptosm3)
365 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
366 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
370 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
371 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
373 /* Need information in other operand(s) to help decoding. */
374 info->qualifier = get_expected_qualifier (inst, info->idx);
375 switch (info->qualifier)
377 case AARCH64_OPND_QLF_S_H:
378 if (info->type == AARCH64_OPND_Em16)
381 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
383 info->reglane.regno &= 0xf;
388 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
391 case AARCH64_OPND_QLF_S_S:
393 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
395 case AARCH64_OPND_QLF_S_D:
397 info->reglane.index = extract_field (FLD_H, code, 0);
403 if (inst->opcode->op == OP_FCMLA_ELEM
404 && info->qualifier != AARCH64_OPND_QLF_S_H)
406 /* Complex operand takes two elements. */
407 if (info->reglane.index & 1)
409 info->reglane.index /= 2;
417 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
418 const aarch64_insn code,
419 const aarch64_inst *inst ATTRIBUTE_UNUSED,
420 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
423 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
425 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
429 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
431 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
432 aarch64_opnd_info *info, const aarch64_insn code,
433 const aarch64_inst *inst,
434 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
437 /* Number of elements in each structure to be loaded/stored. */
438 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
442 unsigned is_reserved;
444 unsigned num_elements;
460 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
462 value = extract_field (FLD_opcode, code, 0);
463 /* PR 21595: Check for a bogus value. */
464 if (value >= ARRAY_SIZE (data))
466 if (expected_num != data[value].num_elements || data[value].is_reserved)
468 info->reglist.num_regs = data[value].num_regs;
473 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
474 lanes instructions. */
476 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
477 aarch64_opnd_info *info, const aarch64_insn code,
478 const aarch64_inst *inst,
479 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
484 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
486 value = extract_field (FLD_S, code, 0);
488 /* Number of registers is equal to the number of elements in
489 each structure to be loaded/stored. */
490 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
491 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
493 /* Except when it is LD1R. */
494 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
495 info->reglist.num_regs = 2;
500 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
501 load/store single element instructions. */
503 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
504 aarch64_opnd_info *info, const aarch64_insn code,
505 const aarch64_inst *inst ATTRIBUTE_UNUSED,
506 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
508 aarch64_field field = {0, 0};
509 aarch64_insn QSsize; /* fields Q:S:size. */
510 aarch64_insn opcodeh2; /* opcode<2:1> */
513 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
515 /* Decode the index, opcode<2:1> and size. */
516 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
517 opcodeh2 = extract_field_2 (&field, code, 0);
518 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
522 info->qualifier = AARCH64_OPND_QLF_S_B;
523 /* Index encoded in "Q:S:size". */
524 info->reglist.index = QSsize;
530 info->qualifier = AARCH64_OPND_QLF_S_H;
531 /* Index encoded in "Q:S:size<1>". */
532 info->reglist.index = QSsize >> 1;
535 if ((QSsize >> 1) & 0x1)
538 if ((QSsize & 0x1) == 0)
540 info->qualifier = AARCH64_OPND_QLF_S_S;
541 /* Index encoded in "Q:S". */
542 info->reglist.index = QSsize >> 2;
546 if (extract_field (FLD_S, code, 0))
549 info->qualifier = AARCH64_OPND_QLF_S_D;
550 /* Index encoded in "Q". */
551 info->reglist.index = QSsize >> 3;
558 info->reglist.has_index = 1;
559 info->reglist.num_regs = 0;
560 /* Number of registers is equal to the number of elements in
561 each structure to be loaded/stored. */
562 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
563 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
568 /* Decode fields immh:immb and/or Q for e.g.
569 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
570 or SSHR <V><d>, <V><n>, #<shift>. */
573 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
574 aarch64_opnd_info *info, const aarch64_insn code,
575 const aarch64_inst *inst,
576 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
579 aarch64_insn Q, imm, immh;
580 enum aarch64_insn_class iclass = inst->opcode->iclass;
582 immh = extract_field (FLD_immh, code, 0);
585 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
587 /* Get highest set bit in immh. */
588 while (--pos >= 0 && (immh & 0x8) == 0)
591 assert ((iclass == asimdshf || iclass == asisdshf)
592 && (info->type == AARCH64_OPND_IMM_VLSR
593 || info->type == AARCH64_OPND_IMM_VLSL));
595 if (iclass == asimdshf)
597 Q = extract_field (FLD_Q, code, 0);
599 0000 x SEE AdvSIMD modified immediate
609 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
612 info->qualifier = get_sreg_qualifier_from_value (pos);
614 if (info->type == AARCH64_OPND_IMM_VLSR)
616 0000 SEE AdvSIMD modified immediate
617 0001 (16-UInt(immh:immb))
618 001x (32-UInt(immh:immb))
619 01xx (64-UInt(immh:immb))
620 1xxx (128-UInt(immh:immb)) */
621 info->imm.value = (16 << pos) - imm;
625 0000 SEE AdvSIMD modified immediate
626 0001 (UInt(immh:immb)-8)
627 001x (UInt(immh:immb)-16)
628 01xx (UInt(immh:immb)-32)
629 1xxx (UInt(immh:immb)-64) */
630 info->imm.value = imm - (8 << pos);
635 /* Decode shift immediate for e.g. sshr (imm). */
637 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 aarch64_opnd_info *info, const aarch64_insn code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
644 val = extract_field (FLD_size, code, 0);
647 case 0: imm = 8; break;
648 case 1: imm = 16; break;
649 case 2: imm = 32; break;
650 default: return FALSE;
652 info->imm.value = imm;
656 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
657 value in the field(s) will be extracted as unsigned immediate value. */
659 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
660 const aarch64_insn code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED,
662 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
666 imm = extract_all_fields (self, code);
668 if (operand_need_sign_extension (self))
669 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
671 if (operand_need_shift_by_two (self))
674 if (info->type == AARCH64_OPND_ADDR_ADRP)
677 info->imm.value = imm;
681 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
683 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
684 const aarch64_insn code,
685 const aarch64_inst *inst ATTRIBUTE_UNUSED,
686 aarch64_operand_error *errors)
688 aarch64_ext_imm (self, info, code, inst, errors);
689 info->shifter.kind = AARCH64_MOD_LSL;
690 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
694 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
695 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
697 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
698 aarch64_opnd_info *info,
699 const aarch64_insn code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED,
701 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
704 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
705 aarch64_field field = {0, 0};
707 assert (info->idx == 1);
709 if (info->type == AARCH64_OPND_SIMD_FPIMM)
712 /* a:b:c:d:e:f:g:h */
713 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
714 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
716 /* Either MOVI <Dd>, #<imm>
717 or MOVI <Vd>.2D, #<imm>.
718 <imm> is a 64-bit immediate
719 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
720 encoded in "a:b:c:d:e:f:g:h". */
722 unsigned abcdefgh = imm;
723 for (imm = 0ull, i = 0; i < 8; i++)
724 if (((abcdefgh >> i) & 0x1) != 0)
725 imm |= 0xffull << (8 * i);
727 info->imm.value = imm;
730 info->qualifier = get_expected_qualifier (inst, info->idx);
731 switch (info->qualifier)
733 case AARCH64_OPND_QLF_NIL:
735 info->shifter.kind = AARCH64_MOD_NONE;
737 case AARCH64_OPND_QLF_LSL:
739 info->shifter.kind = AARCH64_MOD_LSL;
740 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
742 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
743 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
744 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
745 default: assert (0); return FALSE;
747 /* 00: 0; 01: 8; 10:16; 11:24. */
748 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
750 case AARCH64_OPND_QLF_MSL:
752 info->shifter.kind = AARCH64_MOD_MSL;
753 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
754 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
764 /* Decode an 8-bit floating-point immediate. */
766 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
767 const aarch64_insn code,
768 const aarch64_inst *inst ATTRIBUTE_UNUSED,
769 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
771 info->imm.value = extract_all_fields (self, code);
776 /* Decode a 1-bit rotate immediate (#90 or #270). */
778 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
779 const aarch64_insn code,
780 const aarch64_inst *inst ATTRIBUTE_UNUSED,
781 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
783 uint64_t rot = extract_field (self->fields[0], code, 0);
785 info->imm.value = rot * 180 + 90;
789 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
791 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
792 const aarch64_insn code,
793 const aarch64_inst *inst ATTRIBUTE_UNUSED,
794 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
796 uint64_t rot = extract_field (self->fields[0], code, 0);
798 info->imm.value = rot * 90;
802 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
804 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
805 aarch64_opnd_info *info, const aarch64_insn code,
806 const aarch64_inst *inst ATTRIBUTE_UNUSED,
807 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
809 info->imm.value = 64- extract_field (FLD_scale, code, 0);
813 /* Decode arithmetic immediate for e.g.
814 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
816 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
817 aarch64_opnd_info *info, const aarch64_insn code,
818 const aarch64_inst *inst ATTRIBUTE_UNUSED,
819 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
823 info->shifter.kind = AARCH64_MOD_LSL;
825 value = extract_field (FLD_shift, code, 0);
828 info->shifter.amount = value ? 12 : 0;
829 /* imm12 (unsigned) */
830 info->imm.value = extract_field (FLD_imm12, code, 0);
835 /* Return true if VALUE is a valid logical immediate encoding, storing the
836 decoded value in *RESULT if so. ESIZE is the number of bytes in the
837 decoded immediate. */
839 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
845 /* value is N:immr:imms. */
847 R = (value >> 6) & 0x3f;
848 N = (value >> 12) & 0x1;
850 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
851 (in other words, right rotated by R), then replicated. */
855 mask = 0xffffffffffffffffull;
861 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
862 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
863 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
864 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
865 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
866 default: return FALSE;
868 mask = (1ull << simd_size) - 1;
869 /* Top bits are IGNORED. */
873 if (simd_size > esize * 8)
876 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
877 if (S == simd_size - 1)
879 /* S+1 consecutive bits to 1. */
880 /* NOTE: S can't be 63 due to detection above. */
881 imm = (1ull << (S + 1)) - 1;
882 /* Rotate to the left by simd_size - R. */
884 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
885 /* Replicate the value according to SIMD size. */
888 case 2: imm = (imm << 2) | imm;
890 case 4: imm = (imm << 4) | imm;
892 case 8: imm = (imm << 8) | imm;
894 case 16: imm = (imm << 16) | imm;
896 case 32: imm = (imm << 32) | imm;
899 default: assert (0); return 0;
902 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
907 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
909 aarch64_ext_limm (const aarch64_operand *self,
910 aarch64_opnd_info *info, const aarch64_insn code,
911 const aarch64_inst *inst,
912 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
917 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
919 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
920 return decode_limm (esize, value, &info->imm.value);
923 /* Decode a logical immediate for the BIC alias of AND (etc.). */
925 aarch64_ext_inv_limm (const aarch64_operand *self,
926 aarch64_opnd_info *info, const aarch64_insn code,
927 const aarch64_inst *inst,
928 aarch64_operand_error *errors)
930 if (!aarch64_ext_limm (self, info, code, inst, errors))
932 info->imm.value = ~info->imm.value;
936 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
937 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
939 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
940 aarch64_opnd_info *info,
941 const aarch64_insn code, const aarch64_inst *inst,
942 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
947 info->reg.regno = extract_field (FLD_Rt, code, 0);
950 value = extract_field (FLD_ldst_size, code, 0);
951 if (inst->opcode->iclass == ldstpair_indexed
952 || inst->opcode->iclass == ldstnapair_offs
953 || inst->opcode->iclass == ldstpair_off
954 || inst->opcode->iclass == loadlit)
956 enum aarch64_opnd_qualifier qualifier;
959 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
960 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
961 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
962 default: return FALSE;
964 info->qualifier = qualifier;
969 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
972 info->qualifier = get_sreg_qualifier_from_value (value);
978 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
980 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
981 aarch64_opnd_info *info,
983 const aarch64_inst *inst ATTRIBUTE_UNUSED,
984 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
987 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
991 /* Decode the address operand for e.g.
992 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
994 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
995 aarch64_opnd_info *info,
996 aarch64_insn code, const aarch64_inst *inst,
997 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
999 info->qualifier = get_expected_qualifier (inst, info->idx);
1002 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1005 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1006 info->addr.offset.imm = sign_extend (imm, 8);
1007 if (extract_field (self->fields[2], code, 0) == 1) {
1008 info->addr.writeback = 1;
1009 info->addr.preind = 1;
1014 /* Decode the address operand for e.g.
1015 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1017 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1018 aarch64_opnd_info *info,
1019 aarch64_insn code, const aarch64_inst *inst,
1020 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1022 aarch64_insn S, value;
1025 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1027 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1029 value = extract_field (FLD_option, code, 0);
1030 info->shifter.kind =
1031 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1032 /* Fix-up the shifter kind; although the table-driven approach is
1033 efficient, it is slightly inflexible, thus needing this fix-up. */
1034 if (info->shifter.kind == AARCH64_MOD_UXTX)
1035 info->shifter.kind = AARCH64_MOD_LSL;
1037 S = extract_field (FLD_S, code, 0);
1040 info->shifter.amount = 0;
1041 info->shifter.amount_present = 0;
1046 /* Need information in other operand(s) to help achieve the decoding
1048 info->qualifier = get_expected_qualifier (inst, info->idx);
1049 /* Get the size of the data element that is accessed, which may be
1050 different from that of the source register size, e.g. in strb/ldrb. */
1051 size = aarch64_get_qualifier_esize (info->qualifier);
1052 info->shifter.amount = get_logsz (size);
1053 info->shifter.amount_present = 1;
1059 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1061 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1062 aarch64_insn code, const aarch64_inst *inst,
1063 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1066 info->qualifier = get_expected_qualifier (inst, info->idx);
1069 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1070 /* simm (imm9 or imm7) */
1071 imm = extract_field (self->fields[0], code, 0);
1072 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1073 if (self->fields[0] == FLD_imm7)
1074 /* scaled immediate in ld/st pair instructions. */
1075 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1077 if (inst->opcode->iclass == ldst_unscaled
1078 || inst->opcode->iclass == ldstnapair_offs
1079 || inst->opcode->iclass == ldstpair_off
1080 || inst->opcode->iclass == ldst_unpriv)
1081 info->addr.writeback = 0;
1084 /* pre/post- index */
1085 info->addr.writeback = 1;
1086 if (extract_field (self->fields[1], code, 0) == 1)
1087 info->addr.preind = 1;
1089 info->addr.postind = 1;
1095 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1097 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1099 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1100 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1103 info->qualifier = get_expected_qualifier (inst, info->idx);
1104 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1106 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1108 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1112 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1114 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1116 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1117 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1121 info->qualifier = get_expected_qualifier (inst, info->idx);
1123 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1125 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1126 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1127 if (extract_field (self->fields[3], code, 0) == 1) {
1128 info->addr.writeback = 1;
1129 info->addr.preind = 1;
1134 /* Decode the address operand for e.g.
1135 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1137 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1138 aarch64_opnd_info *info,
1139 aarch64_insn code, const aarch64_inst *inst,
1140 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1142 /* The opcode dependent area stores the number of elements in
1143 each structure to be loaded/stored. */
1144 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1147 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1148 /* Rm | #<amount> */
1149 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1150 if (info->addr.offset.regno == 31)
1152 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1153 /* Special handling of loading single structure to all lane. */
1154 info->addr.offset.imm = (is_ld1r ? 1
1155 : inst->operands[0].reglist.num_regs)
1156 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1158 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1159 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1160 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1163 info->addr.offset.is_reg = 1;
1164 info->addr.writeback = 1;
1169 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1171 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1172 aarch64_opnd_info *info,
1173 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1174 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1178 value = extract_field (FLD_cond, code, 0);
1179 info->cond = get_cond_from_value (value);
1183 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1185 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1186 aarch64_opnd_info *info,
1188 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1189 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1191 /* op0:op1:CRn:CRm:op2 */
1192 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1194 info->sysreg.flags = 0;
1196 /* If a system instruction, check which restrictions should be on the register
1197 value during decoding, these will be enforced then. */
1198 if (inst->opcode->iclass == ic_system)
1200 /* Check to see if it's read-only, else check if it's write only.
1201 if it's both or unspecified don't care. */
1202 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1203 info->sysreg.flags = F_REG_READ;
1204 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1206 info->sysreg.flags = F_REG_WRITE;
1212 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1214 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1215 aarch64_opnd_info *info, aarch64_insn code,
1216 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1217 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1221 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1222 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1223 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1225 /* Reserved value in <pstatefield>. */
1229 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1231 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1232 aarch64_opnd_info *info,
1234 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1235 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1239 const aarch64_sys_ins_reg *sysins_ops;
1240 /* op0:op1:CRn:CRm:op2 */
1241 value = extract_fields (code, 0, 5,
1242 FLD_op0, FLD_op1, FLD_CRn,
1247 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1248 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1249 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1250 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1251 default: assert (0); return FALSE;
1254 for (i = 0; sysins_ops[i].name != NULL; ++i)
1255 if (sysins_ops[i].value == value)
1257 info->sysins_op = sysins_ops + i;
1258 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1259 info->sysins_op->name,
1260 (unsigned)info->sysins_op->value,
1261 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1268 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1271 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1272 aarch64_opnd_info *info,
1274 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1275 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1278 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1282 /* Decode the prefetch operation option operand for e.g.
1283 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1286 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1287 aarch64_opnd_info *info,
1288 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1289 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1292 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1296 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1297 to the matching name/value pair in aarch64_hint_options. */
1300 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1301 aarch64_opnd_info *info,
1303 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1304 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1307 unsigned hint_number;
1310 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1312 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1314 if (hint_number == aarch64_hint_options[i].value)
1316 info->hint_option = &(aarch64_hint_options[i]);
1324 /* Decode the extended register operand for e.g.
1325 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1327 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1328 aarch64_opnd_info *info,
1330 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1331 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1336 info->reg.regno = extract_field (FLD_Rm, code, 0);
1338 value = extract_field (FLD_option, code, 0);
1339 info->shifter.kind =
1340 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1342 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1344 /* This makes the constraint checking happy. */
1345 info->shifter.operator_present = 1;
1347 /* Assume inst->operands[0].qualifier has been resolved. */
1348 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1349 info->qualifier = AARCH64_OPND_QLF_W;
1350 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1351 && (info->shifter.kind == AARCH64_MOD_UXTX
1352 || info->shifter.kind == AARCH64_MOD_SXTX))
1353 info->qualifier = AARCH64_OPND_QLF_X;
1358 /* Decode the shifted register operand for e.g.
1359 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1361 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1362 aarch64_opnd_info *info,
1364 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1365 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1370 info->reg.regno = extract_field (FLD_Rm, code, 0);
1372 value = extract_field (FLD_shift, code, 0);
1373 info->shifter.kind =
1374 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1375 if (info->shifter.kind == AARCH64_MOD_ROR
1376 && inst->opcode->iclass != log_shift)
1377 /* ROR is not available for the shifted register operand in arithmetic
1381 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1383 /* This makes the constraint checking happy. */
1384 info->shifter.operator_present = 1;
1389 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1390 where <offset> is given by the OFFSET parameter and where <factor> is
1391 1 plus SELF's operand-dependent value. fields[0] specifies the field
1392 that holds <base>. */
1394 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1395 aarch64_opnd_info *info, aarch64_insn code,
1398 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1399 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1400 info->addr.offset.is_reg = FALSE;
1401 info->addr.writeback = FALSE;
1402 info->addr.preind = TRUE;
1404 info->shifter.kind = AARCH64_MOD_MUL_VL;
1405 info->shifter.amount = 1;
1406 info->shifter.operator_present = (info->addr.offset.imm != 0);
1407 info->shifter.amount_present = FALSE;
1411 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1412 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1413 SELF's operand-dependent value. fields[0] specifies the field that
1414 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1416 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1417 aarch64_opnd_info *info, aarch64_insn code,
1418 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1419 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1423 offset = extract_field (FLD_SVE_imm4, code, 0);
1424 offset = ((offset + 8) & 15) - 8;
1425 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1428 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1429 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1430 SELF's operand-dependent value. fields[0] specifies the field that
1431 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1433 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1434 aarch64_opnd_info *info, aarch64_insn code,
1435 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1436 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1440 offset = extract_field (FLD_SVE_imm6, code, 0);
1441 offset = (((offset + 32) & 63) - 32);
1442 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1445 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1446 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1447 SELF's operand-dependent value. fields[0] specifies the field that
1448 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1449 and imm3 fields, with imm3 being the less-significant part. */
1451 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1452 aarch64_opnd_info *info,
1454 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1455 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1459 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1460 offset = (((offset + 256) & 511) - 256);
1461 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1464 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1465 is given by the OFFSET parameter and where <shift> is SELF's operand-
1466 dependent value. fields[0] specifies the base register field <base>. */
1468 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1469 aarch64_opnd_info *info, aarch64_insn code,
1472 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1473 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1474 info->addr.offset.is_reg = FALSE;
1475 info->addr.writeback = FALSE;
1476 info->addr.preind = TRUE;
1477 info->shifter.operator_present = FALSE;
1478 info->shifter.amount_present = FALSE;
1482 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1483 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1484 value. fields[0] specifies the base register field. */
1486 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1487 aarch64_opnd_info *info, aarch64_insn code,
1488 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1489 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1491 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1492 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1495 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1496 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1497 value. fields[0] specifies the base register field. */
1499 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1500 aarch64_opnd_info *info, aarch64_insn code,
1501 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1502 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1504 int offset = extract_field (FLD_SVE_imm6, code, 0);
1505 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1508 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1509 is SELF's operand-dependent value. fields[0] specifies the base
1510 register field and fields[1] specifies the offset register field. */
1512 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1513 aarch64_opnd_info *info, aarch64_insn code,
1514 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1515 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1519 index_regno = extract_field (self->fields[1], code, 0);
1520 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1523 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1524 info->addr.offset.regno = index_regno;
1525 info->addr.offset.is_reg = TRUE;
1526 info->addr.writeback = FALSE;
1527 info->addr.preind = TRUE;
1528 info->shifter.kind = AARCH64_MOD_LSL;
1529 info->shifter.amount = get_operand_specific_data (self);
1530 info->shifter.operator_present = (info->shifter.amount != 0);
1531 info->shifter.amount_present = (info->shifter.amount != 0);
1535 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1536 <shift> is SELF's operand-dependent value. fields[0] specifies the
1537 base register field, fields[1] specifies the offset register field and
1538 fields[2] is a single-bit field that selects SXTW over UXTW. */
1540 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1541 aarch64_opnd_info *info, aarch64_insn code,
1542 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1543 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1545 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1546 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1547 info->addr.offset.is_reg = TRUE;
1548 info->addr.writeback = FALSE;
1549 info->addr.preind = TRUE;
1550 if (extract_field (self->fields[2], code, 0))
1551 info->shifter.kind = AARCH64_MOD_SXTW;
1553 info->shifter.kind = AARCH64_MOD_UXTW;
1554 info->shifter.amount = get_operand_specific_data (self);
1555 info->shifter.operator_present = TRUE;
1556 info->shifter.amount_present = (info->shifter.amount != 0);
1560 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1561 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1562 fields[0] specifies the base register field. */
1564 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1565 aarch64_opnd_info *info, aarch64_insn code,
1566 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1567 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1569 int offset = extract_field (FLD_imm5, code, 0);
1570 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1573 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1574 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1575 number. fields[0] specifies the base register field and fields[1]
1576 specifies the offset register field. */
1578 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1579 aarch64_insn code, enum aarch64_modifier_kind kind)
1581 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1582 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1583 info->addr.offset.is_reg = TRUE;
1584 info->addr.writeback = FALSE;
1585 info->addr.preind = TRUE;
1586 info->shifter.kind = kind;
1587 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1588 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1589 || info->shifter.amount != 0);
1590 info->shifter.amount_present = (info->shifter.amount != 0);
1594 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1595 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1596 field and fields[1] specifies the offset register field. */
1598 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1599 aarch64_opnd_info *info, aarch64_insn code,
1600 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1601 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1603 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1606 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1607 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1608 field and fields[1] specifies the offset register field. */
1610 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1611 aarch64_opnd_info *info, aarch64_insn code,
1612 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1613 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1615 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1618 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1619 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1620 field and fields[1] specifies the offset register field. */
1622 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1623 aarch64_opnd_info *info, aarch64_insn code,
1624 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1625 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1627 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1630 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1631 has the raw field value and that the low 8 bits decode to VALUE. */
1633 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1635 info->shifter.kind = AARCH64_MOD_LSL;
1636 info->shifter.amount = 0;
1637 if (info->imm.value & 0x100)
1640 /* Decode 0x100 as #0, LSL #8. */
1641 info->shifter.amount = 8;
1645 info->shifter.operator_present = (info->shifter.amount != 0);
1646 info->shifter.amount_present = (info->shifter.amount != 0);
1647 info->imm.value = value;
1651 /* Decode an SVE ADD/SUB immediate. */
1653 aarch64_ext_sve_aimm (const aarch64_operand *self,
1654 aarch64_opnd_info *info, const aarch64_insn code,
1655 const aarch64_inst *inst,
1656 aarch64_operand_error *errors)
1658 return (aarch64_ext_imm (self, info, code, inst, errors)
1659 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1662 /* Decode an SVE CPY/DUP immediate. */
1664 aarch64_ext_sve_asimm (const aarch64_operand *self,
1665 aarch64_opnd_info *info, const aarch64_insn code,
1666 const aarch64_inst *inst,
1667 aarch64_operand_error *errors)
1669 return (aarch64_ext_imm (self, info, code, inst, errors)
1670 && decode_sve_aimm (info, (int8_t) info->imm.value));
1673 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1674 The fields array specifies which field to use. */
1676 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1677 aarch64_opnd_info *info, aarch64_insn code,
1678 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1679 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1681 if (extract_field (self->fields[0], code, 0))
1682 info->imm.value = 0x3f800000;
1684 info->imm.value = 0x3f000000;
1685 info->imm.is_fp = TRUE;
1689 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1690 The fields array specifies which field to use. */
1692 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1693 aarch64_opnd_info *info, aarch64_insn code,
1694 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1695 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1697 if (extract_field (self->fields[0], code, 0))
1698 info->imm.value = 0x40000000;
1700 info->imm.value = 0x3f000000;
1701 info->imm.is_fp = TRUE;
1705 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1706 The fields array specifies which field to use. */
1708 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1709 aarch64_opnd_info *info, aarch64_insn code,
1710 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1711 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1713 if (extract_field (self->fields[0], code, 0))
1714 info->imm.value = 0x3f800000;
1716 info->imm.value = 0x0;
1717 info->imm.is_fp = TRUE;
1721 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1722 array specifies which field to use for Zn. MM is encoded in the
1723 concatenation of imm5 and SVE_tszh, with imm5 being the less
1724 significant part. */
1726 aarch64_ext_sve_index (const aarch64_operand *self,
1727 aarch64_opnd_info *info, aarch64_insn code,
1728 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1729 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1733 info->reglane.regno = extract_field (self->fields[0], code, 0);
1734 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1735 if ((val & 31) == 0)
1737 while ((val & 1) == 0)
1739 info->reglane.index = val / 2;
1743 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1745 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1746 aarch64_opnd_info *info, const aarch64_insn code,
1747 const aarch64_inst *inst,
1748 aarch64_operand_error *errors)
1750 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1751 return (aarch64_ext_limm (self, info, code, inst, errors)
1752 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1755 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1756 and where MM occupies the most-significant part. The operand-dependent
1757 value specifies the number of bits in Zn. */
1759 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1760 aarch64_opnd_info *info, aarch64_insn code,
1761 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1762 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1764 unsigned int reg_bits = get_operand_specific_data (self);
1765 unsigned int val = extract_all_fields (self, code);
1766 info->reglane.regno = val & ((1 << reg_bits) - 1);
1767 info->reglane.index = val >> reg_bits;
1771 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1772 to use for Zn. The opcode-dependent value specifies the number
1773 of registers in the list. */
1775 aarch64_ext_sve_reglist (const aarch64_operand *self,
1776 aarch64_opnd_info *info, aarch64_insn code,
1777 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1778 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1780 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1781 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1785 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1786 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1789 aarch64_ext_sve_scale (const aarch64_operand *self,
1790 aarch64_opnd_info *info, aarch64_insn code,
1791 const aarch64_inst *inst, aarch64_operand_error *errors)
1795 if (!aarch64_ext_imm (self, info, code, inst, errors))
1797 val = extract_field (FLD_SVE_imm4, code, 0);
1798 info->shifter.kind = AARCH64_MOD_MUL;
1799 info->shifter.amount = val + 1;
1800 info->shifter.operator_present = (val != 0);
1801 info->shifter.amount_present = (val != 0);
1805 /* Return the top set bit in VALUE, which is expected to be relatively
1808 get_top_bit (uint64_t value)
1810 while ((value & -value) != value)
1811 value -= value & -value;
1815 /* Decode an SVE shift-left immediate. */
1817 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1818 aarch64_opnd_info *info, const aarch64_insn code,
1819 const aarch64_inst *inst, aarch64_operand_error *errors)
1821 if (!aarch64_ext_imm (self, info, code, inst, errors)
1822 || info->imm.value == 0)
1825 info->imm.value -= get_top_bit (info->imm.value);
1829 /* Decode an SVE shift-right immediate. */
1831 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1832 aarch64_opnd_info *info, const aarch64_insn code,
1833 const aarch64_inst *inst, aarch64_operand_error *errors)
1835 if (!aarch64_ext_imm (self, info, code, inst, errors)
1836 || info->imm.value == 0)
1839 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1843 /* Bitfields that are commonly used to encode certain operands' information
1844 may be partially used as part of the base opcode in some instructions.
1845 For example, the bit 1 of the field 'size' in
1846 FCVTXN <Vb><d>, <Va><n>
1847 is actually part of the base opcode, while only size<0> is available
1848 for encoding the register type. Another example is the AdvSIMD
1849 instruction ORR (register), in which the field 'size' is also used for
1850 the base opcode, leaving only the field 'Q' available to encode the
1851 vector register arrangement specifier '8B' or '16B'.
1853 This function tries to deduce the qualifier from the value of partially
1854 constrained field(s). Given the VALUE of such a field or fields, the
1855 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1856 operand encoding), the function returns the matching qualifier or
1857 AARCH64_OPND_QLF_NIL if nothing matches.
1859 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1860 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1861 may end with AARCH64_OPND_QLF_NIL. */
1863 static enum aarch64_opnd_qualifier
1864 get_qualifier_from_partial_encoding (aarch64_insn value,
1865 const enum aarch64_opnd_qualifier* \
1870 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1871 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1873 aarch64_insn standard_value;
1874 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1876 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1877 if ((standard_value & mask) == (value & mask))
1878 return candidates[i];
1880 return AARCH64_OPND_QLF_NIL;
1883 /* Given a list of qualifier sequences, return all possible valid qualifiers
1884 for operand IDX in QUALIFIERS.
1885 Assume QUALIFIERS is an array whose length is large enough. */
1888 get_operand_possible_qualifiers (int idx,
1889 const aarch64_opnd_qualifier_seq_t *list,
1890 enum aarch64_opnd_qualifier *qualifiers)
1893 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1894 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1898 /* Decode the size Q field for e.g. SHADD.
1899 We tag one operand with the qualifer according to the code;
1900 whether the qualifier is valid for this opcode or not, it is the
1901 duty of the semantic checking. */
1904 decode_sizeq (aarch64_inst *inst)
1907 enum aarch64_opnd_qualifier qualifier;
1909 aarch64_insn value, mask;
1910 enum aarch64_field_kind fld_sz;
1911 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1913 if (inst->opcode->iclass == asisdlse
1914 || inst->opcode->iclass == asisdlsep
1915 || inst->opcode->iclass == asisdlso
1916 || inst->opcode->iclass == asisdlsop)
1917 fld_sz = FLD_vldst_size;
1922 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1923 /* Obtain the info that which bits of fields Q and size are actually
1924 available for operand encoding. Opcodes like FMAXNM and FMLA have
1925 size[1] unavailable. */
1926 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1928 /* The index of the operand we are going to tag a qualifier and the qualifer
1929 itself are reasoned from the value of the size and Q fields and the
1930 possible valid qualifier lists. */
1931 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1932 DEBUG_TRACE ("key idx: %d", idx);
1934 /* For most related instruciton, size:Q are fully available for operand
1938 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1942 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1944 #ifdef DEBUG_AARCH64
1948 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1949 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1950 DEBUG_TRACE ("qualifier %d: %s", i,
1951 aarch64_get_qualifier_name(candidates[i]));
1952 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1954 #endif /* DEBUG_AARCH64 */
1956 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1958 if (qualifier == AARCH64_OPND_QLF_NIL)
1961 inst->operands[idx].qualifier = qualifier;
1965 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1966 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1969 decode_asimd_fcvt (aarch64_inst *inst)
1971 aarch64_field field = {0, 0};
1973 enum aarch64_opnd_qualifier qualifier;
1975 gen_sub_field (FLD_size, 0, 1, &field);
1976 value = extract_field_2 (&field, inst->value, 0);
1977 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1978 : AARCH64_OPND_QLF_V_2D;
1979 switch (inst->opcode->op)
1983 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1984 inst->operands[1].qualifier = qualifier;
1988 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1989 inst->operands[0].qualifier = qualifier;
1999 /* Decode size[0], i.e. bit 22, for
2000 e.g. FCVTXN <Vb><d>, <Va><n>. */
2003 decode_asisd_fcvtxn (aarch64_inst *inst)
2005 aarch64_field field = {0, 0};
2006 gen_sub_field (FLD_size, 0, 1, &field);
2007 if (!extract_field_2 (&field, inst->value, 0))
2009 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2013 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2015 decode_fcvt (aarch64_inst *inst)
2017 enum aarch64_opnd_qualifier qualifier;
2019 const aarch64_field field = {15, 2};
2022 value = extract_field_2 (&field, inst->value, 0);
2025 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2026 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2027 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2030 inst->operands[0].qualifier = qualifier;
2035 /* Do miscellaneous decodings that are not common enough to be driven by
2039 do_misc_decoding (aarch64_inst *inst)
2042 switch (inst->opcode->op)
2045 return decode_fcvt (inst);
2051 return decode_asimd_fcvt (inst);
2054 return decode_asisd_fcvtxn (inst);
2058 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2059 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2060 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2063 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2064 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2067 /* Index must be zero. */
2068 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2069 return value > 0 && value <= 16 && value == (value & -value);
2072 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2073 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2076 /* Index must be nonzero. */
2077 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2078 return value > 0 && value != (value & -value);
2081 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2082 == extract_field (FLD_SVE_Pm, inst->value, 0));
2084 case OP_MOVZS_P_P_P:
2086 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2087 == extract_field (FLD_SVE_Pm, inst->value, 0));
2089 case OP_NOTS_P_P_P_Z:
2090 case OP_NOT_P_P_P_Z:
2091 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2092 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2099 /* Opcodes that have fields shared by multiple operands are usually flagged
2100 with flags. In this function, we detect such flags, decode the related
2101 field(s) and store the information in one of the related operands. The
2102 'one' operand is not any operand but one of the operands that can
2103 accommadate all the information that has been decoded. */
2106 do_special_decoding (aarch64_inst *inst)
2110 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2111 if (inst->opcode->flags & F_COND)
2113 value = extract_field (FLD_cond2, inst->value, 0);
2114 inst->cond = get_cond_from_value (value);
2117 if (inst->opcode->flags & F_SF)
2119 idx = select_operand_for_sf_field_coding (inst->opcode);
2120 value = extract_field (FLD_sf, inst->value, 0);
2121 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2122 if ((inst->opcode->flags & F_N)
2123 && extract_field (FLD_N, inst->value, 0) != value)
2127 if (inst->opcode->flags & F_LSE_SZ)
2129 idx = select_operand_for_sf_field_coding (inst->opcode);
2130 value = extract_field (FLD_lse_sz, inst->value, 0);
2131 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2133 /* size:Q fields. */
2134 if (inst->opcode->flags & F_SIZEQ)
2135 return decode_sizeq (inst);
2137 if (inst->opcode->flags & F_FPTYPE)
2139 idx = select_operand_for_fptype_field_coding (inst->opcode);
2140 value = extract_field (FLD_type, inst->value, 0);
2143 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2144 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2145 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2150 if (inst->opcode->flags & F_SSIZE)
2152 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2153 of the base opcode. */
2155 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2156 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2157 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2158 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2159 /* For most related instruciton, the 'size' field is fully available for
2160 operand encoding. */
2162 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2165 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2167 inst->operands[idx].qualifier
2168 = get_qualifier_from_partial_encoding (value, candidates, mask);
2172 if (inst->opcode->flags & F_T)
2174 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2177 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2178 == AARCH64_OPND_CLASS_SIMD_REG);
2189 val = extract_field (FLD_imm5, inst->value, 0);
2190 while ((val & 0x1) == 0 && ++num <= 3)
2194 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2195 inst->operands[0].qualifier =
2196 get_vreg_qualifier_from_value ((num << 1) | Q);
2199 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2201 /* Use Rt to encode in the case of e.g.
2202 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2203 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2206 /* Otherwise use the result operand, which has to be a integer
2208 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2209 == AARCH64_OPND_CLASS_INT_REG);
2212 assert (idx == 0 || idx == 1);
2213 value = extract_field (FLD_Q, inst->value, 0);
2214 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2217 if (inst->opcode->flags & F_LDS_SIZE)
2219 aarch64_field field = {0, 0};
2220 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2221 == AARCH64_OPND_CLASS_INT_REG);
2222 gen_sub_field (FLD_opc, 0, 1, &field);
2223 value = extract_field_2 (&field, inst->value, 0);
2224 inst->operands[0].qualifier
2225 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2228 /* Miscellaneous decoding; done as the last step. */
2229 if (inst->opcode->flags & F_MISC)
2230 return do_misc_decoding (inst);
2235 /* Converters converting a real opcode instruction to its alias form. */
2237 /* ROR <Wd>, <Ws>, #<shift>
2239 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2241 convert_extr_to_ror (aarch64_inst *inst)
2243 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2245 copy_operand_info (inst, 2, 3);
2246 inst->operands[3].type = AARCH64_OPND_NIL;
2252 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2254 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2256 convert_shll_to_xtl (aarch64_inst *inst)
2258 if (inst->operands[2].imm.value == 0)
2260 inst->operands[2].type = AARCH64_OPND_NIL;
2267 UBFM <Xd>, <Xn>, #<shift>, #63.
2269 LSR <Xd>, <Xn>, #<shift>. */
2271 convert_bfm_to_sr (aarch64_inst *inst)
2275 imms = inst->operands[3].imm.value;
2276 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2279 inst->operands[3].type = AARCH64_OPND_NIL;
2286 /* Convert MOV to ORR. */
2288 convert_orr_to_mov (aarch64_inst *inst)
2290 /* MOV <Vd>.<T>, <Vn>.<T>
2292 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2293 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2295 inst->operands[2].type = AARCH64_OPND_NIL;
2301 /* When <imms> >= <immr>, the instruction written:
2302 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2304 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2307 convert_bfm_to_bfx (aarch64_inst *inst)
2311 immr = inst->operands[2].imm.value;
2312 imms = inst->operands[3].imm.value;
2316 inst->operands[2].imm.value = lsb;
2317 inst->operands[3].imm.value = imms + 1 - lsb;
2318 /* The two opcodes have different qualifiers for
2319 the immediate operands; reset to help the checking. */
2320 reset_operand_qualifier (inst, 2);
2321 reset_operand_qualifier (inst, 3);
2328 /* When <imms> < <immr>, the instruction written:
2329 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2331 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2334 convert_bfm_to_bfi (aarch64_inst *inst)
2336 int64_t immr, imms, val;
2338 immr = inst->operands[2].imm.value;
2339 imms = inst->operands[3].imm.value;
2340 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2343 inst->operands[2].imm.value = (val - immr) & (val - 1);
2344 inst->operands[3].imm.value = imms + 1;
2345 /* The two opcodes have different qualifiers for
2346 the immediate operands; reset to help the checking. */
2347 reset_operand_qualifier (inst, 2);
2348 reset_operand_qualifier (inst, 3);
2355 /* The instruction written:
2356 BFC <Xd>, #<lsb>, #<width>
2358 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2361 convert_bfm_to_bfc (aarch64_inst *inst)
2363 int64_t immr, imms, val;
2365 /* Should have been assured by the base opcode value. */
2366 assert (inst->operands[1].reg.regno == 0x1f);
2368 immr = inst->operands[2].imm.value;
2369 imms = inst->operands[3].imm.value;
2370 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2373 /* Drop XZR from the second operand. */
2374 copy_operand_info (inst, 1, 2);
2375 copy_operand_info (inst, 2, 3);
2376 inst->operands[3].type = AARCH64_OPND_NIL;
2378 /* Recalculate the immediates. */
2379 inst->operands[1].imm.value = (val - immr) & (val - 1);
2380 inst->operands[2].imm.value = imms + 1;
2382 /* The two opcodes have different qualifiers for the operands; reset to
2383 help the checking. */
2384 reset_operand_qualifier (inst, 1);
2385 reset_operand_qualifier (inst, 2);
2386 reset_operand_qualifier (inst, 3);
2394 /* The instruction written:
2395 LSL <Xd>, <Xn>, #<shift>
2397 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2400 convert_ubfm_to_lsl (aarch64_inst *inst)
2402 int64_t immr = inst->operands[2].imm.value;
2403 int64_t imms = inst->operands[3].imm.value;
2405 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2407 if ((immr == 0 && imms == val) || immr == imms + 1)
2409 inst->operands[3].type = AARCH64_OPND_NIL;
2410 inst->operands[2].imm.value = val - imms;
2417 /* CINC <Wd>, <Wn>, <cond>
2419 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2420 where <cond> is not AL or NV. */
2423 convert_from_csel (aarch64_inst *inst)
2425 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2426 && (inst->operands[3].cond->value & 0xe) != 0xe)
2428 copy_operand_info (inst, 2, 3);
2429 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2430 inst->operands[3].type = AARCH64_OPND_NIL;
2436 /* CSET <Wd>, <cond>
2438 CSINC <Wd>, WZR, WZR, invert(<cond>)
2439 where <cond> is not AL or NV. */
2442 convert_csinc_to_cset (aarch64_inst *inst)
2444 if (inst->operands[1].reg.regno == 0x1f
2445 && inst->operands[2].reg.regno == 0x1f
2446 && (inst->operands[3].cond->value & 0xe) != 0xe)
2448 copy_operand_info (inst, 1, 3);
2449 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2450 inst->operands[3].type = AARCH64_OPND_NIL;
2451 inst->operands[2].type = AARCH64_OPND_NIL;
2459 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2461 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2462 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2463 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2464 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2465 machine-instruction mnemonic must be used. */
2468 convert_movewide_to_mov (aarch64_inst *inst)
2470 uint64_t value = inst->operands[1].imm.value;
2471 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2472 if (value == 0 && inst->operands[1].shifter.amount != 0)
2474 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2475 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2476 value <<= inst->operands[1].shifter.amount;
2477 /* As an alias convertor, it has to be clear that the INST->OPCODE
2478 is the opcode of the real instruction. */
2479 if (inst->opcode->op == OP_MOVN)
2481 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2483 /* A MOVN has an immediate that could be encoded by MOVZ. */
2484 if (aarch64_wide_constant_p (value, is32, NULL))
2487 inst->operands[1].imm.value = value;
2488 inst->operands[1].shifter.amount = 0;
2494 ORR <Wd>, WZR, #<imm>.
2496 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2497 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2498 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2499 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2500 machine-instruction mnemonic must be used. */
2503 convert_movebitmask_to_mov (aarch64_inst *inst)
2508 /* Should have been assured by the base opcode value. */
2509 assert (inst->operands[1].reg.regno == 0x1f);
2510 copy_operand_info (inst, 1, 2);
2511 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2512 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2513 value = inst->operands[1].imm.value;
2514 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2516 if (inst->operands[0].reg.regno != 0x1f
2517 && (aarch64_wide_constant_p (value, is32, NULL)
2518 || aarch64_wide_constant_p (~value, is32, NULL)))
2521 inst->operands[2].type = AARCH64_OPND_NIL;
2525 /* Some alias opcodes are disassembled by being converted from their real-form.
2526 N.B. INST->OPCODE is the real opcode rather than the alias. */
2529 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2535 return convert_bfm_to_sr (inst);
2537 return convert_ubfm_to_lsl (inst);
2541 return convert_from_csel (inst);
2544 return convert_csinc_to_cset (inst);
2548 return convert_bfm_to_bfx (inst);
2552 return convert_bfm_to_bfi (inst);
2554 return convert_bfm_to_bfc (inst);
2556 return convert_orr_to_mov (inst);
2557 case OP_MOV_IMM_WIDE:
2558 case OP_MOV_IMM_WIDEN:
2559 return convert_movewide_to_mov (inst);
2560 case OP_MOV_IMM_LOG:
2561 return convert_movebitmask_to_mov (inst);
2563 return convert_extr_to_ror (inst);
2568 return convert_shll_to_xtl (inst);
2575 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2576 aarch64_inst *, int, aarch64_operand_error *errors);
2578 /* Given the instruction information in *INST, check if the instruction has
2579 any alias form that can be used to represent *INST. If the answer is yes,
2580 update *INST to be in the form of the determined alias. */
2582 /* In the opcode description table, the following flags are used in opcode
2583 entries to help establish the relations between the real and alias opcodes:
2585 F_ALIAS: opcode is an alias
2586 F_HAS_ALIAS: opcode has alias(es)
2589 F_P3: Disassembly preference priority 1-3 (the larger the
2590 higher). If nothing is specified, it is the priority
2591 0 by default, i.e. the lowest priority.
2593 Although the relation between the machine and the alias instructions are not
2594 explicitly described, it can be easily determined from the base opcode
2595 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2596 description entries:
2598 The mask of an alias opcode must be equal to or a super-set (i.e. more
2599 constrained) of that of the aliased opcode; so is the base opcode value.
2601 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2602 && (opcode->mask & real->mask) == real->mask
2603 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2604 then OPCODE is an alias of, and only of, the REAL instruction
2606 The alias relationship is forced flat-structured to keep related algorithm
2607 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2609 During the disassembling, the decoding decision tree (in
2610 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2611 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2612 not specified), the disassembler will check whether there is any alias
2613 instruction exists for this real instruction. If there is, the disassembler
2614 will try to disassemble the 32-bit binary again using the alias's rule, or
2615 try to convert the IR to the form of the alias. In the case of the multiple
2616 aliases, the aliases are tried one by one from the highest priority
2617 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2618 first succeeds first adopted.
2620 You may ask why there is a need for the conversion of IR from one form to
2621 another in handling certain aliases. This is because on one hand it avoids
2622 adding more operand code to handle unusual encoding/decoding; on other
2623 hand, during the disassembling, the conversion is an effective approach to
2624 check the condition of an alias (as an alias may be adopted only if certain
2625 conditions are met).
2627 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2628 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2629 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2632 determine_disassembling_preference (struct aarch64_inst *inst,
2633 aarch64_operand_error *errors)
2635 const aarch64_opcode *opcode;
2636 const aarch64_opcode *alias;
2638 opcode = inst->opcode;
2640 /* This opcode does not have an alias, so use itself. */
2641 if (!opcode_has_alias (opcode))
2644 alias = aarch64_find_alias_opcode (opcode);
2647 #ifdef DEBUG_AARCH64
2650 const aarch64_opcode *tmp = alias;
2651 printf ("#### LIST orderd: ");
2654 printf ("%s, ", tmp->name);
2655 tmp = aarch64_find_next_alias_opcode (tmp);
2659 #endif /* DEBUG_AARCH64 */
2661 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2663 DEBUG_TRACE ("try %s", alias->name);
2664 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2666 /* An alias can be a pseudo opcode which will never be used in the
2667 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2669 if (pseudo_opcode_p (alias))
2671 DEBUG_TRACE ("skip pseudo %s", alias->name);
2675 if ((inst->value & alias->mask) != alias->opcode)
2677 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2680 /* No need to do any complicated transformation on operands, if the alias
2681 opcode does not have any operand. */
2682 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2684 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2685 aarch64_replace_opcode (inst, alias);
2688 if (alias->flags & F_CONV)
2691 memcpy (©, inst, sizeof (aarch64_inst));
2692 /* ALIAS is the preference as long as the instruction can be
2693 successfully converted to the form of ALIAS. */
2694 if (convert_to_alias (©, alias) == 1)
2696 aarch64_replace_opcode (©, alias);
2697 assert (aarch64_match_operands_constraint (©, NULL));
2698 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2699 memcpy (inst, ©, sizeof (aarch64_inst));
2705 /* Directly decode the alias opcode. */
2707 memset (&temp, '\0', sizeof (aarch64_inst));
2708 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2710 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2711 memcpy (inst, &temp, sizeof (aarch64_inst));
2718 /* Some instructions (including all SVE ones) use the instruction class
2719 to describe how a qualifiers_list index is represented in the instruction
2720 encoding. If INST is such an instruction, decode the appropriate fields
2721 and fill in the operand qualifiers accordingly. Return true if no
2722 problems are found. */
2725 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2730 switch (inst->opcode->iclass)
2733 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2737 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2740 while ((i & 1) == 0)
2748 /* Pick the smallest applicable element size. */
2749 if ((inst->value & 0x20600) == 0x600)
2751 else if ((inst->value & 0x20400) == 0x400)
2753 else if ((inst->value & 0x20000) == 0)
2760 /* sve_misc instructions have only a single variant. */
2764 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2768 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2771 case sve_shift_pred:
2772 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2783 case sve_shift_unpred:
2784 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2788 variant = extract_field (FLD_size, inst->value, 0);
2794 variant = extract_field (FLD_size, inst->value, 0);
2798 i = extract_field (FLD_size, inst->value, 0);
2805 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2809 /* No mapping between instruction class and qualifiers. */
2813 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2814 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2817 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2818 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2821 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2822 determined and used to disassemble CODE; this is done just before the
2826 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2827 aarch64_inst *inst, int noaliases_p,
2828 aarch64_operand_error *errors)
2832 DEBUG_TRACE ("enter with %s", opcode->name);
2834 assert (opcode && inst);
2837 memset (inst, '\0', sizeof (aarch64_inst));
2839 /* Check the base opcode. */
2840 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2842 DEBUG_TRACE ("base opcode match FAIL");
2846 inst->opcode = opcode;
2849 /* Assign operand codes and indexes. */
2850 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2852 if (opcode->operands[i] == AARCH64_OPND_NIL)
2854 inst->operands[i].type = opcode->operands[i];
2855 inst->operands[i].idx = i;
2858 /* Call the opcode decoder indicated by flags. */
2859 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2861 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2865 /* Possibly use the instruction class to determine the correct
2867 if (!aarch64_decode_variant_using_iclass (inst))
2869 DEBUG_TRACE ("iclass-based decoder FAIL");
2873 /* Call operand decoders. */
2874 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2876 const aarch64_operand *opnd;
2877 enum aarch64_opnd type;
2879 type = opcode->operands[i];
2880 if (type == AARCH64_OPND_NIL)
2882 opnd = &aarch64_operands[type];
2883 if (operand_has_extractor (opnd)
2884 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2887 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2892 /* If the opcode has a verifier, then check it now. */
2893 if (opcode->verifier && ! opcode->verifier (opcode, code))
2895 DEBUG_TRACE ("operand verifier FAIL");
2899 /* Match the qualifiers. */
2900 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2902 /* Arriving here, the CODE has been determined as a valid instruction
2903 of OPCODE and *INST has been filled with information of this OPCODE
2904 instruction. Before the return, check if the instruction has any
2905 alias and should be disassembled in the form of its alias instead.
2906 If the answer is yes, *INST will be updated. */
2908 determine_disassembling_preference (inst, errors);
2909 DEBUG_TRACE ("SUCCESS");
2914 DEBUG_TRACE ("constraint matching FAIL");
2921 /* This does some user-friendly fix-up to *INST. It is currently focus on
2922 the adjustment of qualifiers to help the printed instruction
2923 recognized/understood more easily. */
2926 user_friendly_fixup (aarch64_inst *inst)
2928 switch (inst->opcode->iclass)
2931 /* TBNZ Xn|Wn, #uimm6, label
2932 Test and Branch Not Zero: conditionally jumps to label if bit number
2933 uimm6 in register Xn is not zero. The bit number implies the width of
2934 the register, which may be written and should be disassembled as Wn if
2935 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2937 if (inst->operands[1].imm.value < 32)
2938 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2944 /* Decode INSN and fill in *INST the instruction information. An alias
2945 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2949 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2950 bfd_boolean noaliases_p,
2951 aarch64_operand_error *errors)
2953 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2955 #ifdef DEBUG_AARCH64
2958 const aarch64_opcode *tmp = opcode;
2960 DEBUG_TRACE ("opcode lookup:");
2963 aarch64_verbose (" %s", tmp->name);
2964 tmp = aarch64_find_next_opcode (tmp);
2967 #endif /* DEBUG_AARCH64 */
2969 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2970 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2971 opcode field and value, apart from the difference that one of them has an
2972 extra field as part of the opcode, but such a field is used for operand
2973 encoding in other opcode(s) ('immh' in the case of the example). */
2974 while (opcode != NULL)
2976 /* But only one opcode can be decoded successfully for, as the
2977 decoding routine will check the constraint carefully. */
2978 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
2980 opcode = aarch64_find_next_opcode (opcode);
2986 /* Print operands. */
2989 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2990 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2992 int i, pcrel_p, num_printed;
2994 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2997 /* We regard the opcode operand info more, however we also look into
2998 the inst->operands to support the disassembling of the optional
3000 The two operand code should be the same in all cases, apart from
3001 when the operand can be optional. */
3002 if (opcode->operands[i] == AARCH64_OPND_NIL
3003 || opnds[i].type == AARCH64_OPND_NIL)
3006 /* Generate the operand string in STR. */
3007 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3008 &info->target, ¬es);
3010 /* Print the delimiter (taking account of omitted operand(s)). */
3012 (*info->fprintf_func) (info->stream, "%s",
3013 num_printed++ == 0 ? "\t" : ", ");
3015 /* Print the operand. */
3017 (*info->print_address_func) (info->target, info);
3019 (*info->fprintf_func) (info->stream, "%s", str);
3022 if (notes && !no_notes)
3023 (*info->fprintf_func) (info->stream, "\t; note: %s", notes);
3026 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3029 remove_dot_suffix (char *name, const aarch64_inst *inst)
3034 ptr = strchr (inst->opcode->name, '.');
3035 assert (ptr && inst->cond);
3036 len = ptr - inst->opcode->name;
3038 strncpy (name, inst->opcode->name, len);
3042 /* Print the instruction mnemonic name. */
3045 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3047 if (inst->opcode->flags & F_COND)
3049 /* For instructions that are truly conditionally executed, e.g. b.cond,
3050 prepare the full mnemonic name with the corresponding condition
3054 remove_dot_suffix (name, inst);
3055 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3058 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3061 /* Decide whether we need to print a comment after the operands of
3062 instruction INST. */
3065 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3067 if (inst->opcode->flags & F_COND)
3070 unsigned int i, num_conds;
3072 remove_dot_suffix (name, inst);
3073 num_conds = ARRAY_SIZE (inst->cond->names);
3074 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3075 (*info->fprintf_func) (info->stream, "%s %s.%s",
3076 i == 1 ? " //" : ",",
3077 name, inst->cond->names[i]);
3081 /* Print the instruction according to *INST. */
3084 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3085 struct disassemble_info *info)
3087 print_mnemonic_name (inst, info);
3088 print_operands (pc, inst->opcode, inst->operands, info);
3089 print_comment (inst, info);
3092 /* Entry-point of the instruction disassembler and printer. */
3095 print_insn_aarch64_word (bfd_vma pc,
3097 struct disassemble_info *info,
3098 aarch64_operand_error *errors)
3100 static const char *err_msg[6] =
3103 [-ERR_UND] = "undefined",
3104 [-ERR_UNP] = "unpredictable",
3111 info->insn_info_valid = 1;
3112 info->branch_delay_insns = 0;
3113 info->data_size = 0;
3117 if (info->flags & INSN_HAS_RELOC)
3118 /* If the instruction has a reloc associated with it, then
3119 the offset field in the instruction will actually be the
3120 addend for the reloc. (If we are using REL type relocs).
3121 In such cases, we can ignore the pc when computing
3122 addresses, since the addend is not currently pc-relative. */
3125 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3127 if (((word >> 21) & 0x3ff) == 1)
3129 /* RESERVED for ALES. */
3130 assert (ret != ERR_OK);
3139 /* Handle undefined instructions. */
3140 info->insn_type = dis_noninsn;
3141 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3142 word, err_msg[-ret]);
3145 user_friendly_fixup (&inst);
3146 print_aarch64_insn (pc, &inst, info);
3153 /* Disallow mapping symbols ($x, $d etc) from
3154 being displayed in symbol relative addresses. */
3157 aarch64_symbol_is_valid (asymbol * sym,
3158 struct disassemble_info * info ATTRIBUTE_UNUSED)
3165 name = bfd_asymbol_name (sym);
3169 || (name[1] != 'x' && name[1] != 'd')
3170 || (name[2] != '\0' && name[2] != '.'));
3173 /* Print data bytes on INFO->STREAM. */
3176 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3178 struct disassemble_info *info,
3179 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3181 switch (info->bytes_per_chunk)
3184 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3187 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3190 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3197 /* Try to infer the code or data type from a symbol.
3198 Returns nonzero if *MAP_TYPE was set. */
3201 get_sym_code_type (struct disassemble_info *info, int n,
3202 enum map_type *map_type)
3204 elf_symbol_type *es;
3208 /* If the symbol is in a different section, ignore it. */
3209 if (info->section != NULL && info->section != info->symtab[n]->section)
3212 es = *(elf_symbol_type **)(info->symtab + n);
3213 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3215 /* If the symbol has function type then use that. */
3216 if (type == STT_FUNC)
3218 *map_type = MAP_INSN;
3222 /* Check for mapping symbols. */
3223 name = bfd_asymbol_name(info->symtab[n]);
3225 && (name[1] == 'x' || name[1] == 'd')
3226 && (name[2] == '\0' || name[2] == '.'))
3228 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3235 /* Entry-point of the AArch64 disassembler. */
3238 print_insn_aarch64 (bfd_vma pc,
3239 struct disassemble_info *info)
3241 bfd_byte buffer[INSNLEN];
3243 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3244 aarch64_operand_error *);
3245 bfd_boolean found = FALSE;
3246 unsigned int size = 4;
3248 aarch64_operand_error errors;
3250 if (info->disassembler_options)
3252 set_default_aarch64_dis_options (info);
3254 parse_aarch64_dis_options (info->disassembler_options);
3256 /* To avoid repeated parsing of these options, we remove them here. */
3257 info->disassembler_options = NULL;
3260 /* Aarch64 instructions are always little-endian */
3261 info->endian_code = BFD_ENDIAN_LITTLE;
3263 /* First check the full symtab for a mapping symbol, even if there
3264 are no usable non-mapping symbols for this address. */
3265 if (info->symtab_size != 0
3266 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3268 enum map_type type = MAP_INSN;
3273 if (pc <= last_mapping_addr)
3274 last_mapping_sym = -1;
3276 /* Start scanning at the start of the function, or wherever
3277 we finished last time. */
3278 n = info->symtab_pos + 1;
3279 if (n < last_mapping_sym)
3280 n = last_mapping_sym;
3282 /* Scan up to the location being disassembled. */
3283 for (; n < info->symtab_size; n++)
3285 addr = bfd_asymbol_value (info->symtab[n]);
3288 if (get_sym_code_type (info, n, &type))
3297 n = info->symtab_pos;
3298 if (n < last_mapping_sym)
3299 n = last_mapping_sym;
3301 /* No mapping symbol found at this address. Look backwards
3302 for a preceeding one. */
3305 if (get_sym_code_type (info, n, &type))
3314 last_mapping_sym = last_sym;
3317 /* Look a little bit ahead to see if we should print out
3318 less than four bytes of data. If there's a symbol,
3319 mapping or otherwise, after two bytes then don't
3321 if (last_type == MAP_DATA)
3323 size = 4 - (pc & 3);
3324 for (n = last_sym + 1; n < info->symtab_size; n++)
3326 addr = bfd_asymbol_value (info->symtab[n]);
3329 if (addr - pc < size)
3334 /* If the next symbol is after three bytes, we need to
3335 print only part of the data, so that we can use either
3338 size = (pc & 1) ? 1 : 2;
3342 if (last_type == MAP_DATA)
3344 /* size was set above. */
3345 info->bytes_per_chunk = size;
3346 info->display_endian = info->endian;
3347 printer = print_insn_data;
3351 info->bytes_per_chunk = size = INSNLEN;
3352 info->display_endian = info->endian_code;
3353 printer = print_insn_aarch64_word;
3356 status = (*info->read_memory_func) (pc, buffer, size, info);
3359 (*info->memory_error_func) (status, pc, info);
3363 data = bfd_get_bits (buffer, size * 8,
3364 info->display_endian == BFD_ENDIAN_BIG);
3366 (*printer) (pc, data, info, &errors);
3372 print_aarch64_disassembler_options (FILE *stream)
3374 fprintf (stream, _("\n\
3375 The following AARCH64 specific disassembler options are supported for use\n\
3376 with the -M switch (multiple options should be separated by commas):\n"));
3378 fprintf (stream, _("\n\
3379 no-aliases Don't print instruction aliases.\n"));
3381 fprintf (stream, _("\n\
3382 aliases Do print instruction aliases.\n"));
3384 fprintf (stream, _("\n\
3385 no-notes Don't print instruction notes.\n"));
3387 fprintf (stream, _("\n\
3388 notes Do print instruction notes.\n"));
3390 #ifdef DEBUG_AARCH64
3391 fprintf (stream, _("\n\
3392 debug_dump Temp switch for debug trace.\n"));
3393 #endif /* DEBUG_AARCH64 */
3395 fprintf (stream, _("\n"));